@promptbook/node 0.110.0-1 → 0.110.0-10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1785 -510
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +4 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -2
- package/esm/typings/src/_packages/openai.index.d.ts +8 -4
- package/esm/typings/src/_packages/types.index.d.ts +12 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +22 -21
- package/esm/typings/src/book-2.0/agent-source/AgentReferenceResolver.d.ts +18 -0
- package/esm/typings/src/book-2.0/agent-source/CreateAgentModelRequirementsOptions.d.ts +12 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +8 -2
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.agentReferenceResolver.test.d.ts +1 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +4 -5
- package/esm/typings/src/book-components/Chat/AgentChip/AgentChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatActionsBar.d.ts +4 -2
- package/esm/typings/src/book-components/Chat/Chat/ChatInputArea.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatMessageList.d.ts +1 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +15 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatSoundToggle.d.ts +31 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +10 -1
- package/esm/typings/src/book-components/Chat/SourceChip/SourceChip.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/utils/collectTeamToolCallSummary.d.ts +69 -0
- package/esm/typings/src/book-components/Chat/utils/getToolCallChipletInfo.d.ts +13 -13
- package/esm/typings/src/book-components/Chat/utils/parseCitationsFromContent.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/utils/toolCallParsing.d.ts +4 -0
- package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +0 -3
- package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +9 -0
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/Agent.d.ts +1 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +5 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.test.d.ts +1 -0
- package/esm/typings/src/llm-providers/agent/AgentOptions.d.ts +10 -0
- package/esm/typings/src/llm-providers/agent/CreateAgentLlmExecutionToolsOptions.d.ts +13 -2
- package/esm/typings/src/llm-providers/agent/RemoteAgent.d.ts +2 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionTools.d.ts +150 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAgentKitExecutionToolsOptions.d.ts +15 -0
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionToolsOptions.d.ts +3 -4
- package/esm/typings/src/llm-providers/openai/OpenAiVectorStoreHandler.d.ts +135 -0
- package/esm/typings/src/llm-providers/openai/utils/mapToolsToOpenAi.d.ts +1 -1
- package/esm/typings/src/types/LlmToolDefinition.d.ts +1 -0
- package/esm/typings/src/types/ModelRequirements.d.ts +9 -0
- package/esm/typings/src/utils/DEFAULT_THINKING_MESSAGES.d.ts +8 -0
- package/esm/typings/src/utils/agents/resolveAgentAvatarImageUrl.d.ts +29 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.d.ts +38 -0
- package/esm/typings/src/utils/knowledge/inlineKnowledgeSource.test.d.ts +1 -0
- package/esm/typings/src/utils/language/getBrowserPreferredSpeechRecognitionLanguage.d.ts +35 -0
- package/esm/typings/src/utils/toolCalls/getToolCallIdentity.d.ts +10 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +7 -3
- package/umd/index.umd.js +1788 -514
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/openai/OpenAiAgentExecutionTools.d.ts +0 -43
- package/esm/typings/src/llm-providers/openai/createOpenAiAgentExecutionTools.d.ts +0 -11
package/umd/index.umd.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('jszip'), require('crypto'), require('crypto-js'), require('crypto-js/enc-hex'), require('@mozilla/readability'), require('jsdom'), require('showdown'), require('child_process'), require('waitasecond'), require('dotenv'), require('crypto-js/sha256'), require('rxjs'), require('moment'), require('mime-types'), require('papaparse'), require('bottleneck'), require('openai')) :
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'jszip', 'crypto', 'crypto-js', 'crypto-js/enc-hex', '@mozilla/readability', 'jsdom', 'showdown', 'child_process', 'waitasecond', 'dotenv', 'crypto-js/sha256', 'rxjs', 'moment', 'mime-types', 'papaparse', 'bottleneck', 'openai'], factory) :
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim$1, global.JSZip, global.crypto, global.cryptoJs, global.hexEncoder, global.readability, global.jsdom, global.showdown, global.child_process, global.waitasecond, global.dotenv, global.sha256, global.rxjs, global.moment, global.mimeTypes, global.papaparse, global.Bottleneck, global.OpenAI));
|
|
5
|
-
})(this, (function (exports, colors, promises, path, spaceTrim$1, JSZip, crypto, cryptoJs, hexEncoder, readability, jsdom, showdown, child_process, waitasecond, dotenv, sha256, rxjs, moment, mimeTypes, papaparse, Bottleneck, OpenAI) { 'use strict';
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('jszip'), require('crypto'), require('crypto-js'), require('crypto-js/enc-hex'), require('@mozilla/readability'), require('jsdom'), require('showdown'), require('child_process'), require('waitasecond'), require('dotenv'), require('crypto-js/sha256'), require('rxjs'), require('moment'), require('mime-types'), require('papaparse'), require('@openai/agents'), require('bottleneck'), require('openai')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'jszip', 'crypto', 'crypto-js', 'crypto-js/enc-hex', '@mozilla/readability', 'jsdom', 'showdown', 'child_process', 'waitasecond', 'dotenv', 'crypto-js/sha256', 'rxjs', 'moment', 'mime-types', 'papaparse', '@openai/agents', 'bottleneck', 'openai'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim$1, global.JSZip, global.crypto, global.cryptoJs, global.hexEncoder, global.readability, global.jsdom, global.showdown, global.child_process, global.waitasecond, global.dotenv, global.sha256, global.rxjs, global.moment, global.mimeTypes, global.papaparse, global.agents, global.Bottleneck, global.OpenAI));
|
|
5
|
+
})(this, (function (exports, colors, promises, path, spaceTrim$1, JSZip, crypto, cryptoJs, hexEncoder, readability, jsdom, showdown, child_process, waitasecond, dotenv, sha256, rxjs, moment, mimeTypes, papaparse, agents, Bottleneck, OpenAI) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
@@ -48,7 +48,7 @@
|
|
|
48
48
|
* @generated
|
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
|
50
50
|
*/
|
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-
|
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.110.0-10';
|
|
52
52
|
/**
|
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -13723,6 +13723,28 @@
|
|
|
13723
13723
|
return currentMessage + separator + content;
|
|
13724
13724
|
});
|
|
13725
13725
|
}
|
|
13726
|
+
/**
|
|
13727
|
+
* Helper method to create a new requirements object with updated prompt suffix
|
|
13728
|
+
*/
|
|
13729
|
+
updatePromptSuffix(requirements, contentUpdate) {
|
|
13730
|
+
const newSuffix = typeof contentUpdate === 'string' ? contentUpdate : contentUpdate(requirements.promptSuffix);
|
|
13731
|
+
return {
|
|
13732
|
+
...requirements,
|
|
13733
|
+
promptSuffix: newSuffix,
|
|
13734
|
+
};
|
|
13735
|
+
}
|
|
13736
|
+
/**
|
|
13737
|
+
* Helper method to append content to the prompt suffix
|
|
13738
|
+
* Default separator is a single newline for bullet lists.
|
|
13739
|
+
*/
|
|
13740
|
+
appendToPromptSuffix(requirements, content, separator = '\n') {
|
|
13741
|
+
return this.updatePromptSuffix(requirements, (currentSuffix) => {
|
|
13742
|
+
if (!currentSuffix.trim()) {
|
|
13743
|
+
return content;
|
|
13744
|
+
}
|
|
13745
|
+
return `${currentSuffix}${separator}${content}`;
|
|
13746
|
+
});
|
|
13747
|
+
}
|
|
13726
13748
|
/**
|
|
13727
13749
|
* Helper method to add a comment section to the system message
|
|
13728
13750
|
* Comments are lines starting with # that will be removed from the final system message
|
|
@@ -13900,13 +13922,9 @@
|
|
|
13900
13922
|
`);
|
|
13901
13923
|
}
|
|
13902
13924
|
applyToAgentModelRequirements(requirements, _content) {
|
|
13903
|
-
const updatedMetadata = {
|
|
13904
|
-
...requirements.metadata,
|
|
13905
|
-
isClosed: true,
|
|
13906
|
-
};
|
|
13907
13925
|
return {
|
|
13908
13926
|
...requirements,
|
|
13909
|
-
|
|
13927
|
+
isClosed: true,
|
|
13910
13928
|
};
|
|
13911
13929
|
}
|
|
13912
13930
|
}
|
|
@@ -14184,12 +14202,12 @@
|
|
|
14184
14202
|
return requirements;
|
|
14185
14203
|
}
|
|
14186
14204
|
// Get existing dictionary entries from metadata
|
|
14187
|
-
const existingDictionary = ((_a = requirements.
|
|
14205
|
+
const existingDictionary = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.DICTIONARY) || '';
|
|
14188
14206
|
// Merge the new dictionary entry with existing entries
|
|
14189
14207
|
const mergedDictionary = existingDictionary ? `${existingDictionary}\n${trimmedContent}` : trimmedContent;
|
|
14190
14208
|
// Store the merged dictionary in metadata for debugging and inspection
|
|
14191
14209
|
const updatedMetadata = {
|
|
14192
|
-
...requirements.
|
|
14210
|
+
...requirements._metadata,
|
|
14193
14211
|
DICTIONARY: mergedDictionary,
|
|
14194
14212
|
};
|
|
14195
14213
|
// Create the dictionary section for the system message
|
|
@@ -14197,7 +14215,7 @@
|
|
|
14197
14215
|
const dictionarySection = `# DICTIONARY\n${mergedDictionary}`;
|
|
14198
14216
|
return {
|
|
14199
14217
|
...this.appendToSystemMessage(requirements, dictionarySection),
|
|
14200
|
-
|
|
14218
|
+
_metadata: updatedMetadata,
|
|
14201
14219
|
};
|
|
14202
14220
|
}
|
|
14203
14221
|
}
|
|
@@ -14337,10 +14355,7 @@
|
|
|
14337
14355
|
applyToAgentModelRequirements(requirements, content) {
|
|
14338
14356
|
const trimmedContent = content.trim();
|
|
14339
14357
|
if (!trimmedContent) {
|
|
14340
|
-
return
|
|
14341
|
-
...requirements,
|
|
14342
|
-
parentAgentUrl: undefined,
|
|
14343
|
-
};
|
|
14358
|
+
return requirements;
|
|
14344
14359
|
}
|
|
14345
14360
|
if (trimmedContent.toUpperCase() === 'VOID' ||
|
|
14346
14361
|
trimmedContent.toUpperCase() === 'NULL' ||
|
|
@@ -14554,6 +14569,136 @@
|
|
|
14554
14569
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
14555
14570
|
*/
|
|
14556
14571
|
|
|
14572
|
+
/**
|
|
14573
|
+
* @@@
|
|
14574
|
+
*
|
|
14575
|
+
* @private thing of inline knowledge
|
|
14576
|
+
*/
|
|
14577
|
+
const INLINE_KNOWLEDGE_BASE_NAME = 'inline-knowledge';
|
|
14578
|
+
/**
|
|
14579
|
+
* @@@
|
|
14580
|
+
*
|
|
14581
|
+
* @private thing of inline knowledge
|
|
14582
|
+
*/
|
|
14583
|
+
const INLINE_KNOWLEDGE_EXTENSION = '.txt';
|
|
14584
|
+
/**
|
|
14585
|
+
* @@@
|
|
14586
|
+
*
|
|
14587
|
+
* @private thing of inline knowledge
|
|
14588
|
+
*/
|
|
14589
|
+
const DATA_URL_PREFIX = 'data:';
|
|
14590
|
+
/**
|
|
14591
|
+
* @@@
|
|
14592
|
+
*
|
|
14593
|
+
* @private thing of inline knowledge
|
|
14594
|
+
*/
|
|
14595
|
+
function getFirstNonEmptyLine(content) {
|
|
14596
|
+
const lines = content.split(/\r?\n/);
|
|
14597
|
+
for (const line of lines) {
|
|
14598
|
+
const trimmed = line.trim();
|
|
14599
|
+
if (trimmed) {
|
|
14600
|
+
return trimmed;
|
|
14601
|
+
}
|
|
14602
|
+
}
|
|
14603
|
+
return null;
|
|
14604
|
+
}
|
|
14605
|
+
/**
|
|
14606
|
+
* @@@
|
|
14607
|
+
*
|
|
14608
|
+
* @private thing of inline knowledge
|
|
14609
|
+
*/
|
|
14610
|
+
function deriveBaseFilename(content) {
|
|
14611
|
+
const firstLine = getFirstNonEmptyLine(content);
|
|
14612
|
+
if (!firstLine) {
|
|
14613
|
+
return INLINE_KNOWLEDGE_BASE_NAME;
|
|
14614
|
+
}
|
|
14615
|
+
const normalized = normalizeToKebabCase(firstLine);
|
|
14616
|
+
return normalized || INLINE_KNOWLEDGE_BASE_NAME;
|
|
14617
|
+
}
|
|
14618
|
+
/**
|
|
14619
|
+
* Creates a data URL that represents the inline knowledge content as a text file.
|
|
14620
|
+
*
|
|
14621
|
+
* @private thing of inline knowledge
|
|
14622
|
+
*/
|
|
14623
|
+
function createInlineKnowledgeSourceFile(content) {
|
|
14624
|
+
const trimmedContent = content.trim();
|
|
14625
|
+
const baseName = deriveBaseFilename(trimmedContent);
|
|
14626
|
+
const filename = `${baseName}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
14627
|
+
const mimeType = 'text/plain';
|
|
14628
|
+
const base64 = Buffer.from(trimmedContent, 'utf-8').toString('base64');
|
|
14629
|
+
const encodedFilename = encodeURIComponent(filename);
|
|
14630
|
+
const url = `${DATA_URL_PREFIX}${mimeType};name=${encodedFilename};charset=utf-8;base64,${base64}`;
|
|
14631
|
+
return {
|
|
14632
|
+
filename,
|
|
14633
|
+
mimeType,
|
|
14634
|
+
url,
|
|
14635
|
+
};
|
|
14636
|
+
}
|
|
14637
|
+
/**
|
|
14638
|
+
* Checks whether the provided source string is a data URL that can be decoded.
|
|
14639
|
+
*
|
|
14640
|
+
* @private thing of inline knowledge
|
|
14641
|
+
*/
|
|
14642
|
+
function isDataUrlKnowledgeSource(source) {
|
|
14643
|
+
return typeof source === 'string' && source.startsWith(DATA_URL_PREFIX);
|
|
14644
|
+
}
|
|
14645
|
+
/**
|
|
14646
|
+
* Parses a data URL-based knowledge source into its raw buffer, filename, and MIME type.
|
|
14647
|
+
*
|
|
14648
|
+
* @private thing of inline knowledge
|
|
14649
|
+
*/
|
|
14650
|
+
function parseDataUrlKnowledgeSource(source) {
|
|
14651
|
+
if (!isDataUrlKnowledgeSource(source)) {
|
|
14652
|
+
return null;
|
|
14653
|
+
}
|
|
14654
|
+
const commaIndex = source.indexOf(',');
|
|
14655
|
+
if (commaIndex === -1) {
|
|
14656
|
+
return null;
|
|
14657
|
+
}
|
|
14658
|
+
const header = source.slice(DATA_URL_PREFIX.length, commaIndex);
|
|
14659
|
+
const payload = source.slice(commaIndex + 1);
|
|
14660
|
+
const tokens = header.split(';');
|
|
14661
|
+
const mediaType = tokens[0] || 'text/plain';
|
|
14662
|
+
let filename = `${INLINE_KNOWLEDGE_BASE_NAME}${INLINE_KNOWLEDGE_EXTENSION}`;
|
|
14663
|
+
let isBase64 = false;
|
|
14664
|
+
for (let i = 1; i < tokens.length; i++) {
|
|
14665
|
+
const token = tokens[i];
|
|
14666
|
+
if (!token) {
|
|
14667
|
+
continue;
|
|
14668
|
+
}
|
|
14669
|
+
if (token.toLowerCase() === 'base64') {
|
|
14670
|
+
isBase64 = true;
|
|
14671
|
+
continue;
|
|
14672
|
+
}
|
|
14673
|
+
const [key, value] = token.split('=');
|
|
14674
|
+
if (key === 'name' && value !== undefined) {
|
|
14675
|
+
try {
|
|
14676
|
+
filename = decodeURIComponent(value);
|
|
14677
|
+
}
|
|
14678
|
+
catch (_a) {
|
|
14679
|
+
filename = value;
|
|
14680
|
+
}
|
|
14681
|
+
}
|
|
14682
|
+
}
|
|
14683
|
+
if (!isBase64) {
|
|
14684
|
+
return null;
|
|
14685
|
+
}
|
|
14686
|
+
try {
|
|
14687
|
+
const buffer = Buffer.from(payload, 'base64');
|
|
14688
|
+
return {
|
|
14689
|
+
buffer,
|
|
14690
|
+
filename,
|
|
14691
|
+
mimeType: mediaType,
|
|
14692
|
+
};
|
|
14693
|
+
}
|
|
14694
|
+
catch (_b) {
|
|
14695
|
+
return null;
|
|
14696
|
+
}
|
|
14697
|
+
}
|
|
14698
|
+
/**
|
|
14699
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
14700
|
+
*/
|
|
14701
|
+
|
|
14557
14702
|
/**
|
|
14558
14703
|
* KNOWLEDGE commitment definition
|
|
14559
14704
|
*
|
|
@@ -14652,9 +14797,13 @@
|
|
|
14652
14797
|
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
14653
14798
|
}
|
|
14654
14799
|
else {
|
|
14655
|
-
|
|
14656
|
-
const
|
|
14657
|
-
|
|
14800
|
+
const inlineSource = createInlineKnowledgeSourceFile(trimmedContent);
|
|
14801
|
+
const updatedRequirements = {
|
|
14802
|
+
...requirements,
|
|
14803
|
+
knowledgeSources: [...(requirements.knowledgeSources || []), inlineSource.url],
|
|
14804
|
+
};
|
|
14805
|
+
const knowledgeInfo = `Knowledge Source Inline: ${inlineSource.filename} (derived from inline content and processed for retrieval during chat)`;
|
|
14806
|
+
return this.appendToSystemMessage(updatedRequirements, knowledgeInfo, '\n\n');
|
|
14658
14807
|
}
|
|
14659
14808
|
}
|
|
14660
14809
|
}
|
|
@@ -14901,16 +15050,16 @@
|
|
|
14901
15050
|
// and typically doesn't need to be added to the system prompt or model requirements directly.
|
|
14902
15051
|
// It is extracted separately for the chat interface.
|
|
14903
15052
|
var _a;
|
|
14904
|
-
const pendingUserMessage = (_a = requirements.
|
|
15053
|
+
const pendingUserMessage = (_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.pendingUserMessage;
|
|
14905
15054
|
if (pendingUserMessage) {
|
|
14906
15055
|
const newSample = { question: pendingUserMessage, answer: content };
|
|
14907
15056
|
const newSamples = [...(requirements.samples || []), newSample];
|
|
14908
|
-
const newMetadata = { ...requirements.
|
|
15057
|
+
const newMetadata = { ...requirements._metadata };
|
|
14909
15058
|
delete newMetadata.pendingUserMessage;
|
|
14910
15059
|
return {
|
|
14911
15060
|
...requirements,
|
|
14912
15061
|
samples: newSamples,
|
|
14913
|
-
|
|
15062
|
+
_metadata: newMetadata,
|
|
14914
15063
|
};
|
|
14915
15064
|
}
|
|
14916
15065
|
return requirements;
|
|
@@ -15158,8 +15307,8 @@
|
|
|
15158
15307
|
applyToAgentModelRequirements(requirements, content) {
|
|
15159
15308
|
return {
|
|
15160
15309
|
...requirements,
|
|
15161
|
-
|
|
15162
|
-
...requirements.
|
|
15310
|
+
_metadata: {
|
|
15311
|
+
...requirements._metadata,
|
|
15163
15312
|
pendingUserMessage: content,
|
|
15164
15313
|
},
|
|
15165
15314
|
};
|
|
@@ -16017,11 +16166,7 @@
|
|
|
16017
16166
|
if (trimmedContent === '') {
|
|
16018
16167
|
return requirements;
|
|
16019
16168
|
}
|
|
16020
|
-
|
|
16021
|
-
return {
|
|
16022
|
-
...requirements,
|
|
16023
|
-
notes: [...(requirements.notes || []), trimmedContent],
|
|
16024
|
-
};
|
|
16169
|
+
return requirements;
|
|
16025
16170
|
}
|
|
16026
16171
|
}
|
|
16027
16172
|
/**
|
|
@@ -16083,12 +16228,12 @@
|
|
|
16083
16228
|
// Since OPEN is default, we can just ensure isClosed is false
|
|
16084
16229
|
// But to be explicit we can set it
|
|
16085
16230
|
const updatedMetadata = {
|
|
16086
|
-
...requirements.
|
|
16231
|
+
...requirements._metadata,
|
|
16087
16232
|
isClosed: false,
|
|
16088
16233
|
};
|
|
16089
16234
|
return {
|
|
16090
16235
|
...requirements,
|
|
16091
|
-
|
|
16236
|
+
_metadata: updatedMetadata,
|
|
16092
16237
|
};
|
|
16093
16238
|
}
|
|
16094
16239
|
}
|
|
@@ -16169,7 +16314,7 @@
|
|
|
16169
16314
|
return requirements;
|
|
16170
16315
|
}
|
|
16171
16316
|
// Get existing persona content from metadata
|
|
16172
|
-
const existingPersonaContent = ((_a = requirements.
|
|
16317
|
+
const existingPersonaContent = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.PERSONA) || '';
|
|
16173
16318
|
// Merge the new content with existing persona content
|
|
16174
16319
|
// When multiple PERSONA commitments exist, they are merged into one
|
|
16175
16320
|
const mergedPersonaContent = existingPersonaContent
|
|
@@ -16177,12 +16322,12 @@
|
|
|
16177
16322
|
: trimmedContent;
|
|
16178
16323
|
// Store the merged persona content in metadata for debugging and inspection
|
|
16179
16324
|
const updatedMetadata = {
|
|
16180
|
-
...requirements.
|
|
16325
|
+
...requirements._metadata,
|
|
16181
16326
|
PERSONA: mergedPersonaContent,
|
|
16182
16327
|
};
|
|
16183
16328
|
// Get the agent name from metadata (which should contain the first line of agent source)
|
|
16184
16329
|
// If not available, extract from current system message as fallback
|
|
16185
|
-
let agentName = (_b = requirements.
|
|
16330
|
+
let agentName = (_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.agentName;
|
|
16186
16331
|
if (!agentName) {
|
|
16187
16332
|
// Fallback: extract from current system message
|
|
16188
16333
|
const currentMessage = requirements.systemMessage.trim();
|
|
@@ -16229,7 +16374,7 @@
|
|
|
16229
16374
|
return {
|
|
16230
16375
|
...requirements,
|
|
16231
16376
|
systemMessage: newSystemMessage,
|
|
16232
|
-
|
|
16377
|
+
_metadata: updatedMetadata,
|
|
16233
16378
|
};
|
|
16234
16379
|
}
|
|
16235
16380
|
}
|
|
@@ -16312,7 +16457,16 @@
|
|
|
16312
16457
|
}
|
|
16313
16458
|
// Add rule to the system message
|
|
16314
16459
|
const ruleSection = `Rule: ${trimmedContent}`;
|
|
16315
|
-
|
|
16460
|
+
const requirementsWithRule = this.appendToSystemMessage(requirements, ruleSection, '\n\n');
|
|
16461
|
+
const ruleLines = trimmedContent
|
|
16462
|
+
.split(/\r?\n/)
|
|
16463
|
+
.map((line) => line.trim())
|
|
16464
|
+
.filter(Boolean)
|
|
16465
|
+
.map((line) => `- ${line}`);
|
|
16466
|
+
if (ruleLines.length === 0) {
|
|
16467
|
+
return requirementsWithRule;
|
|
16468
|
+
}
|
|
16469
|
+
return this.appendToPromptSuffix(requirementsWithRule, ruleLines.join('\n'));
|
|
16316
16470
|
}
|
|
16317
16471
|
}
|
|
16318
16472
|
/**
|
|
@@ -16818,7 +16972,7 @@
|
|
|
16818
16972
|
if (teammates.length === 0) {
|
|
16819
16973
|
return requirements;
|
|
16820
16974
|
}
|
|
16821
|
-
const agentName = ((_a = requirements.
|
|
16975
|
+
const agentName = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.agentName) || 'Agent';
|
|
16822
16976
|
const teamEntries = teammates.map((teammate) => ({
|
|
16823
16977
|
toolName: createTeamToolName(teammate.url),
|
|
16824
16978
|
teammate,
|
|
@@ -16858,7 +17012,7 @@
|
|
|
16858
17012
|
},
|
|
16859
17013
|
});
|
|
16860
17014
|
}
|
|
16861
|
-
const existingTeammates = ((_b = requirements.
|
|
17015
|
+
const existingTeammates = ((_b = requirements._metadata) === null || _b === void 0 ? void 0 : _b.teammates) || [];
|
|
16862
17016
|
const updatedTeammates = [...existingTeammates];
|
|
16863
17017
|
for (const entry of teamEntries) {
|
|
16864
17018
|
if (updatedTeammates.some((existing) => existing.url === entry.teammate.url)) {
|
|
@@ -16887,8 +17041,8 @@
|
|
|
16887
17041
|
return this.appendToSystemMessage({
|
|
16888
17042
|
...requirements,
|
|
16889
17043
|
tools: updatedTools,
|
|
16890
|
-
|
|
16891
|
-
...requirements.
|
|
17044
|
+
_metadata: {
|
|
17045
|
+
...requirements._metadata,
|
|
16892
17046
|
teammates: updatedTeammates,
|
|
16893
17047
|
},
|
|
16894
17048
|
}, teamSystemMessage);
|
|
@@ -16988,11 +17142,16 @@
|
|
|
16988
17142
|
const request = buildTeammateRequest(message, args.context);
|
|
16989
17143
|
let response = '';
|
|
16990
17144
|
let error = null;
|
|
17145
|
+
let toolCalls;
|
|
16991
17146
|
try {
|
|
16992
17147
|
const remoteAgent = await getRemoteTeammateAgent(entry.teammate.url);
|
|
16993
17148
|
const prompt = buildTeammatePrompt(request);
|
|
16994
17149
|
const teammateResult = await remoteAgent.callChatModel(prompt);
|
|
16995
17150
|
response = teammateResult.content || '';
|
|
17151
|
+
toolCalls =
|
|
17152
|
+
'toolCalls' in teammateResult && Array.isArray(teammateResult.toolCalls)
|
|
17153
|
+
? teammateResult.toolCalls
|
|
17154
|
+
: undefined;
|
|
16996
17155
|
}
|
|
16997
17156
|
catch (err) {
|
|
16998
17157
|
error = err instanceof Error ? err.message : String(err);
|
|
@@ -17002,6 +17161,7 @@
|
|
|
17002
17161
|
teammate: teammateMetadata,
|
|
17003
17162
|
request,
|
|
17004
17163
|
response: teammateReply,
|
|
17164
|
+
toolCalls: toolCalls && toolCalls.length > 0 ? toolCalls : undefined,
|
|
17005
17165
|
error,
|
|
17006
17166
|
conversation: [
|
|
17007
17167
|
{
|
|
@@ -17114,7 +17274,7 @@
|
|
|
17114
17274
|
if (!trimmedContent) {
|
|
17115
17275
|
// Store template mode flag in metadata
|
|
17116
17276
|
const updatedMetadata = {
|
|
17117
|
-
...requirements.
|
|
17277
|
+
...requirements._metadata,
|
|
17118
17278
|
templateMode: true,
|
|
17119
17279
|
};
|
|
17120
17280
|
// Add a general instruction about using structured templates
|
|
@@ -17124,21 +17284,21 @@
|
|
|
17124
17284
|
`);
|
|
17125
17285
|
return {
|
|
17126
17286
|
...this.appendToSystemMessage(requirements, templateModeInstruction, '\n\n'),
|
|
17127
|
-
|
|
17287
|
+
_metadata: updatedMetadata,
|
|
17128
17288
|
};
|
|
17129
17289
|
}
|
|
17130
17290
|
// If content is provided, add the specific template instructions
|
|
17131
17291
|
const templateSection = `Response Template: ${trimmedContent}`;
|
|
17132
17292
|
// Store the template in metadata for potential programmatic access
|
|
17133
|
-
const existingTemplates = ((_a = requirements.
|
|
17293
|
+
const existingTemplates = ((_a = requirements._metadata) === null || _a === void 0 ? void 0 : _a.templates) || [];
|
|
17134
17294
|
const updatedMetadata = {
|
|
17135
|
-
...requirements.
|
|
17295
|
+
...requirements._metadata,
|
|
17136
17296
|
templates: [...existingTemplates, trimmedContent],
|
|
17137
17297
|
templateMode: true,
|
|
17138
17298
|
};
|
|
17139
17299
|
return {
|
|
17140
17300
|
...this.appendToSystemMessage(requirements, templateSection, '\n\n'),
|
|
17141
|
-
|
|
17301
|
+
_metadata: updatedMetadata,
|
|
17142
17302
|
};
|
|
17143
17303
|
}
|
|
17144
17304
|
}
|
|
@@ -17475,8 +17635,8 @@
|
|
|
17475
17635
|
return this.appendToSystemMessage({
|
|
17476
17636
|
...requirements,
|
|
17477
17637
|
tools: updatedTools,
|
|
17478
|
-
|
|
17479
|
-
...requirements.
|
|
17638
|
+
_metadata: {
|
|
17639
|
+
...requirements._metadata,
|
|
17480
17640
|
useBrowser: true,
|
|
17481
17641
|
},
|
|
17482
17642
|
}, spaceTrim$1.spaceTrim(`
|
|
@@ -17705,8 +17865,8 @@
|
|
|
17705
17865
|
return this.appendToSystemMessage({
|
|
17706
17866
|
...requirements,
|
|
17707
17867
|
tools: updatedTools,
|
|
17708
|
-
|
|
17709
|
-
...requirements.
|
|
17868
|
+
_metadata: {
|
|
17869
|
+
...requirements._metadata,
|
|
17710
17870
|
useEmail: content || true,
|
|
17711
17871
|
},
|
|
17712
17872
|
}, spaceTrim$1.spaceTrim((block) => `
|
|
@@ -17841,8 +18001,8 @@
|
|
|
17841
18001
|
return this.appendToSystemMessage({
|
|
17842
18002
|
...requirements,
|
|
17843
18003
|
tools: updatedTools,
|
|
17844
|
-
|
|
17845
|
-
...requirements.
|
|
18004
|
+
_metadata: {
|
|
18005
|
+
...requirements._metadata,
|
|
17846
18006
|
useImageGenerator: content || true,
|
|
17847
18007
|
},
|
|
17848
18008
|
}, spaceTrim$1.spaceTrim(`
|
|
@@ -18133,8 +18293,8 @@
|
|
|
18133
18293
|
return this.appendToSystemMessage({
|
|
18134
18294
|
...requirements,
|
|
18135
18295
|
tools: updatedTools,
|
|
18136
|
-
|
|
18137
|
-
...requirements.
|
|
18296
|
+
_metadata: {
|
|
18297
|
+
...requirements._metadata,
|
|
18138
18298
|
useSearchEngine: content || true,
|
|
18139
18299
|
},
|
|
18140
18300
|
}, spaceTrim$1.spaceTrim((block) => `
|
|
@@ -18282,8 +18442,8 @@
|
|
|
18282
18442
|
return this.appendToSystemMessage({
|
|
18283
18443
|
...requirements,
|
|
18284
18444
|
tools: updatedTools,
|
|
18285
|
-
|
|
18286
|
-
...requirements.
|
|
18445
|
+
_metadata: {
|
|
18446
|
+
...requirements._metadata,
|
|
18287
18447
|
},
|
|
18288
18448
|
}, spaceTrim$1.spaceTrim((block) => `
|
|
18289
18449
|
Time and date context:
|
|
@@ -20317,6 +20477,40 @@
|
|
|
20317
20477
|
return toolCall.name === ASSISTANT_PREPARATION_TOOL_CALL_NAME;
|
|
20318
20478
|
}
|
|
20319
20479
|
|
|
20480
|
+
/**
|
|
20481
|
+
* Builds a stable identity string for tool calls across partial updates.
|
|
20482
|
+
*
|
|
20483
|
+
* @param toolCall - Tool call entry to identify.
|
|
20484
|
+
* @returns Stable identity string for deduplication.
|
|
20485
|
+
*
|
|
20486
|
+
* @private function of <Chat/>
|
|
20487
|
+
*/
|
|
20488
|
+
function getToolCallIdentity(toolCall) {
|
|
20489
|
+
const rawToolCall = toolCall.rawToolCall;
|
|
20490
|
+
const rawId = (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.id) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.callId) || (rawToolCall === null || rawToolCall === void 0 ? void 0 : rawToolCall.call_id);
|
|
20491
|
+
if (rawId) {
|
|
20492
|
+
return `id:${rawId}`;
|
|
20493
|
+
}
|
|
20494
|
+
if (toolCall.createdAt) {
|
|
20495
|
+
return `time:${toolCall.createdAt}:${toolCall.name}`;
|
|
20496
|
+
}
|
|
20497
|
+
const argsKey = (() => {
|
|
20498
|
+
if (typeof toolCall.arguments === 'string') {
|
|
20499
|
+
return toolCall.arguments;
|
|
20500
|
+
}
|
|
20501
|
+
if (!toolCall.arguments) {
|
|
20502
|
+
return '';
|
|
20503
|
+
}
|
|
20504
|
+
try {
|
|
20505
|
+
return JSON.stringify(toolCall.arguments);
|
|
20506
|
+
}
|
|
20507
|
+
catch (_a) {
|
|
20508
|
+
return '';
|
|
20509
|
+
}
|
|
20510
|
+
})();
|
|
20511
|
+
return `fallback:${toolCall.name}:${argsKey}`;
|
|
20512
|
+
}
|
|
20513
|
+
|
|
20320
20514
|
/*! *****************************************************************************
|
|
20321
20515
|
Copyright (c) Microsoft Corporation.
|
|
20322
20516
|
|
|
@@ -20955,11 +21149,14 @@
|
|
|
20955
21149
|
function createEmptyAgentModelRequirements() {
|
|
20956
21150
|
return {
|
|
20957
21151
|
systemMessage: '',
|
|
21152
|
+
promptSuffix: '',
|
|
20958
21153
|
// modelName: 'gpt-5',
|
|
20959
21154
|
modelName: 'gemini-2.5-flash-lite',
|
|
20960
21155
|
temperature: 0.7,
|
|
20961
21156
|
topP: 0.9,
|
|
20962
21157
|
topK: 50,
|
|
21158
|
+
parentAgentUrl: null,
|
|
21159
|
+
isClosed: false,
|
|
20963
21160
|
};
|
|
20964
21161
|
}
|
|
20965
21162
|
/**
|
|
@@ -21105,14 +21302,26 @@
|
|
|
21105
21302
|
}
|
|
21106
21303
|
|
|
21107
21304
|
/**
|
|
21108
|
-
* Creates agent model requirements using the new commitment system
|
|
21305
|
+
* Creates agent model requirements using the new commitment system.
|
|
21306
|
+
*
|
|
21109
21307
|
* This function uses a reduce-like pattern where each commitment applies its changes
|
|
21110
|
-
* to build the final requirements starting from a basic empty model
|
|
21308
|
+
* to build the final requirements starting from a basic empty model.
|
|
21111
21309
|
*
|
|
21112
|
-
* @
|
|
21310
|
+
* @param agentSource - Agent source book to parse.
|
|
21311
|
+
* @param modelName - Optional override for the agent model name.
|
|
21312
|
+
* @param options - Additional options such as the agent reference resolver.
|
|
21313
|
+
*
|
|
21314
|
+
* @private @@@
|
|
21315
|
+
*/
|
|
21316
|
+
const COMMITMENTS_WITH_AGENT_REFERENCES = new Set(['FROM', 'IMPORT', 'IMPORTS', 'TEAM']);
|
|
21317
|
+
/**
|
|
21318
|
+
* @@@
|
|
21319
|
+
*
|
|
21320
|
+
* @private @@@
|
|
21113
21321
|
*/
|
|
21114
|
-
async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
|
|
21322
|
+
async function createAgentModelRequirementsWithCommitments(agentSource, modelName, options) {
|
|
21115
21323
|
var _a;
|
|
21324
|
+
const agentReferenceResolver = options === null || options === void 0 ? void 0 : options.agentReferenceResolver;
|
|
21116
21325
|
// Parse the agent source to extract commitments
|
|
21117
21326
|
const parseResult = parseAgentSourceWithCommitments(agentSource);
|
|
21118
21327
|
// Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
|
|
@@ -21149,8 +21358,8 @@
|
|
|
21149
21358
|
// Store the agent name in metadata so commitments can access it
|
|
21150
21359
|
requirements = {
|
|
21151
21360
|
...requirements,
|
|
21152
|
-
|
|
21153
|
-
...requirements.
|
|
21361
|
+
_metadata: {
|
|
21362
|
+
...requirements._metadata,
|
|
21154
21363
|
agentName: parseResult.agentName,
|
|
21155
21364
|
},
|
|
21156
21365
|
};
|
|
@@ -21164,6 +21373,11 @@
|
|
|
21164
21373
|
// Apply each commitment in order using reduce-like pattern
|
|
21165
21374
|
for (let i = 0; i < filteredCommitments.length; i++) {
|
|
21166
21375
|
const commitment = filteredCommitments[i];
|
|
21376
|
+
const isReferenceCommitment = Boolean(agentReferenceResolver && COMMITMENTS_WITH_AGENT_REFERENCES.has(commitment.type));
|
|
21377
|
+
let commitmentContent = commitment.content;
|
|
21378
|
+
if (isReferenceCommitment && agentReferenceResolver) {
|
|
21379
|
+
commitmentContent = await agentReferenceResolver.resolveCommitmentContent(commitment.type, commitment.content);
|
|
21380
|
+
}
|
|
21167
21381
|
// CLOSED commitment should work only if its the last commitment in the book
|
|
21168
21382
|
if (commitment.type === 'CLOSED' && i !== filteredCommitments.length - 1) {
|
|
21169
21383
|
continue;
|
|
@@ -21171,7 +21385,7 @@
|
|
|
21171
21385
|
const definition = getCommitmentDefinition(commitment.type);
|
|
21172
21386
|
if (definition) {
|
|
21173
21387
|
try {
|
|
21174
|
-
requirements = definition.applyToAgentModelRequirements(requirements,
|
|
21388
|
+
requirements = definition.applyToAgentModelRequirements(requirements, commitmentContent);
|
|
21175
21389
|
}
|
|
21176
21390
|
catch (error) {
|
|
21177
21391
|
console.warn(`Failed to apply commitment ${commitment.type}:`, error);
|
|
@@ -21319,23 +21533,28 @@
|
|
|
21319
21533
|
}
|
|
21320
21534
|
|
|
21321
21535
|
/**
|
|
21322
|
-
* Creates model requirements for an agent based on its source
|
|
21536
|
+
* Creates model requirements for an agent based on its source.
|
|
21323
21537
|
*
|
|
21324
21538
|
* There are 2 similar functions:
|
|
21325
21539
|
* - `parseAgentSource` which is a lightweight parser for agent source, it parses basic information and its purpose is to be quick and synchronous. The commitments there are hardcoded.
|
|
21326
21540
|
* - `createAgentModelRequirements` which is an asynchronous function that creates model requirements it applies each commitment one by one and works asynchronous.
|
|
21327
21541
|
*
|
|
21542
|
+
* @param agentSource - Book describing the agent.
|
|
21543
|
+
* @param modelName - Optional override for the agent's model.
|
|
21544
|
+
* @param availableModels - Models that could fulfill the agent.
|
|
21545
|
+
* @param llmTools - Execution tools used when selecting a best model.
|
|
21546
|
+
* @param options - Optional hooks such as the agent reference resolver.
|
|
21328
21547
|
* @public exported from `@promptbook/core`
|
|
21329
21548
|
*/
|
|
21330
|
-
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools) {
|
|
21549
|
+
async function createAgentModelRequirements(agentSource, modelName, availableModels, llmTools, options) {
|
|
21331
21550
|
// If availableModels are provided and no specific modelName is given,
|
|
21332
21551
|
// use preparePersona to select the best model
|
|
21333
21552
|
if (availableModels && !modelName && llmTools) {
|
|
21334
21553
|
const selectedModelName = await selectBestModelUsingPersona(agentSource, llmTools);
|
|
21335
|
-
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName);
|
|
21554
|
+
return createAgentModelRequirementsWithCommitments(agentSource, selectedModelName, options);
|
|
21336
21555
|
}
|
|
21337
21556
|
// Use the new commitment-based system with provided or default model
|
|
21338
|
-
return createAgentModelRequirementsWithCommitments(agentSource, modelName);
|
|
21557
|
+
return createAgentModelRequirementsWithCommitments(agentSource, modelName, options);
|
|
21339
21558
|
}
|
|
21340
21559
|
/**
|
|
21341
21560
|
* Selects the best model using the preparePersona function
|
|
@@ -21633,6 +21852,66 @@
|
|
|
21633
21852
|
},
|
|
21634
21853
|
/**/
|
|
21635
21854
|
/**/
|
|
21855
|
+
{
|
|
21856
|
+
modelVariant: 'CHAT',
|
|
21857
|
+
modelTitle: 'gpt-5.2-codex',
|
|
21858
|
+
modelName: 'gpt-5.2-codex',
|
|
21859
|
+
modelDescription: 'High-capability Codex variant tuned for agentic code generation with large contexts and reasoning effort controls. Ideal for long-horizon coding workflows and multi-step reasoning.',
|
|
21860
|
+
pricing: {
|
|
21861
|
+
prompt: pricing(`$1.75 / 1M tokens`),
|
|
21862
|
+
output: pricing(`$14.00 / 1M tokens`),
|
|
21863
|
+
},
|
|
21864
|
+
},
|
|
21865
|
+
/**/
|
|
21866
|
+
/**/
|
|
21867
|
+
{
|
|
21868
|
+
modelVariant: 'CHAT',
|
|
21869
|
+
modelTitle: 'gpt-5.1-codex-max',
|
|
21870
|
+
modelName: 'gpt-5.1-codex-max',
|
|
21871
|
+
modelDescription: 'Premium GPT-5.1 Codex flavor that mirrors gpt-5.1 in capability and pricing while adding Codex tooling optimizations.',
|
|
21872
|
+
pricing: {
|
|
21873
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21874
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21875
|
+
},
|
|
21876
|
+
},
|
|
21877
|
+
/**/
|
|
21878
|
+
/**/
|
|
21879
|
+
{
|
|
21880
|
+
modelVariant: 'CHAT',
|
|
21881
|
+
modelTitle: 'gpt-5.1-codex',
|
|
21882
|
+
modelName: 'gpt-5.1-codex',
|
|
21883
|
+
modelDescription: 'Core GPT-5.1 Codex model focused on agentic coding tasks with a balanced trade-off between reasoning and cost.',
|
|
21884
|
+
pricing: {
|
|
21885
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21886
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21887
|
+
},
|
|
21888
|
+
},
|
|
21889
|
+
/**/
|
|
21890
|
+
/**/
|
|
21891
|
+
{
|
|
21892
|
+
modelVariant: 'CHAT',
|
|
21893
|
+
modelTitle: 'gpt-5.1-codex-mini',
|
|
21894
|
+
modelName: 'gpt-5.1-codex-mini',
|
|
21895
|
+
modelDescription: 'Compact, cost-effective GPT-5.1 Codex variant with a smaller context window ideal for cheap assistant iterations that still require coding awareness.',
|
|
21896
|
+
pricing: {
|
|
21897
|
+
prompt: pricing(`$0.25 / 1M tokens`),
|
|
21898
|
+
output: pricing(`$2.00 / 1M tokens`),
|
|
21899
|
+
},
|
|
21900
|
+
},
|
|
21901
|
+
/**/
|
|
21902
|
+
/**/
|
|
21903
|
+
{
|
|
21904
|
+
modelVariant: 'CHAT',
|
|
21905
|
+
modelTitle: 'gpt-5-codex',
|
|
21906
|
+
modelName: 'gpt-5-codex',
|
|
21907
|
+
modelDescription: 'Legacy GPT-5 Codex model built for agentic coding workloads with the same pricing as GPT-5 and a focus on stability.',
|
|
21908
|
+
pricing: {
|
|
21909
|
+
prompt: pricing(`$1.25 / 1M tokens`),
|
|
21910
|
+
output: pricing(`$10.00 / 1M tokens`),
|
|
21911
|
+
},
|
|
21912
|
+
},
|
|
21913
|
+
/**/
|
|
21914
|
+
/**/
|
|
21636
21915
|
{
|
|
21637
21916
|
modelVariant: 'CHAT',
|
|
21638
21917
|
modelTitle: 'gpt-5-mini',
|
|
@@ -22337,6 +22616,32 @@
|
|
|
22337
22616
|
errorMessage.includes('does not support'));
|
|
22338
22617
|
}
|
|
22339
22618
|
|
|
22619
|
+
/**
|
|
22620
|
+
* Provides access to the structured clone implementation when available.
|
|
22621
|
+
*/
|
|
22622
|
+
function getStructuredCloneFunction() {
|
|
22623
|
+
return globalThis.structuredClone;
|
|
22624
|
+
}
|
|
22625
|
+
/**
|
|
22626
|
+
* Checks whether the prompt is a chat prompt that carries file attachments.
|
|
22627
|
+
*/
|
|
22628
|
+
function hasChatPromptFiles(prompt) {
|
|
22629
|
+
return 'files' in prompt && Array.isArray(prompt.files);
|
|
22630
|
+
}
|
|
22631
|
+
/**
|
|
22632
|
+
* Creates a deep copy of the prompt while keeping attached files intact when structured clone is not available.
|
|
22633
|
+
*/
|
|
22634
|
+
function clonePromptPreservingFiles(prompt) {
|
|
22635
|
+
const structuredCloneFn = getStructuredCloneFunction();
|
|
22636
|
+
if (typeof structuredCloneFn === 'function') {
|
|
22637
|
+
return structuredCloneFn(prompt);
|
|
22638
|
+
}
|
|
22639
|
+
const clonedPrompt = JSON.parse(JSON.stringify(prompt));
|
|
22640
|
+
if (hasChatPromptFiles(prompt)) {
|
|
22641
|
+
clonedPrompt.files = prompt.files;
|
|
22642
|
+
}
|
|
22643
|
+
return clonedPrompt;
|
|
22644
|
+
}
|
|
22340
22645
|
/**
|
|
22341
22646
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
22342
22647
|
*
|
|
@@ -22366,16 +22671,11 @@
|
|
|
22366
22671
|
const openAiOptions = { ...this.options };
|
|
22367
22672
|
delete openAiOptions.isVerbose;
|
|
22368
22673
|
delete openAiOptions.userId;
|
|
22369
|
-
// Enhanced configuration
|
|
22674
|
+
// Enhanced configuration with retries and timeouts.
|
|
22370
22675
|
const enhancedOptions = {
|
|
22371
22676
|
...openAiOptions,
|
|
22372
22677
|
timeout: API_REQUEST_TIMEOUT,
|
|
22373
22678
|
maxRetries: CONNECTION_RETRIES_LIMIT,
|
|
22374
|
-
defaultHeaders: {
|
|
22375
|
-
Connection: 'keep-alive',
|
|
22376
|
-
'Keep-Alive': 'timeout=30, max=100',
|
|
22377
|
-
...openAiOptions.defaultHeaders,
|
|
22378
|
-
},
|
|
22379
22679
|
};
|
|
22380
22680
|
this.client = new OpenAI__default["default"](enhancedOptions);
|
|
22381
22681
|
}
|
|
@@ -22426,7 +22726,7 @@
|
|
|
22426
22726
|
*/
|
|
22427
22727
|
async callChatModelStream(prompt, onProgress) {
|
|
22428
22728
|
// Deep clone prompt and modelRequirements to avoid mutation across calls
|
|
22429
|
-
const clonedPrompt =
|
|
22729
|
+
const clonedPrompt = clonePromptPreservingFiles(prompt);
|
|
22430
22730
|
// Use local Set for retried parameters to ensure independence and thread safety
|
|
22431
22731
|
const retriedUnsupportedParameters = new Set();
|
|
22432
22732
|
return this.callChatModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters, onProgress);
|
|
@@ -22453,7 +22753,10 @@
|
|
|
22453
22753
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
22454
22754
|
// <- Note: [🧆]
|
|
22455
22755
|
}; // <- TODO: [💩] Guard here types better
|
|
22456
|
-
if (
|
|
22756
|
+
if (currentModelRequirements.responseFormat !== undefined) {
|
|
22757
|
+
modelSettings.response_format = currentModelRequirements.responseFormat;
|
|
22758
|
+
}
|
|
22759
|
+
else if (format === 'JSON') {
|
|
22457
22760
|
modelSettings.response_format = {
|
|
22458
22761
|
type: 'json_object',
|
|
22459
22762
|
};
|
|
@@ -23264,18 +23567,6 @@
|
|
|
23264
23567
|
get profile() {
|
|
23265
23568
|
return OPENAI_PROVIDER_PROFILE;
|
|
23266
23569
|
}
|
|
23267
|
-
/*
|
|
23268
|
-
Note: Commenting this out to avoid circular dependency
|
|
23269
|
-
/**
|
|
23270
|
-
* Create (sub)tools for calling OpenAI API Assistants
|
|
23271
|
-
*
|
|
23272
|
-
* @param assistantId Which assistant to use
|
|
23273
|
-
* @returns Tools for calling OpenAI API Assistants with same token
|
|
23274
|
-
* /
|
|
23275
|
-
public createAssistantSubtools(assistantId: string_token): OpenAiAssistantExecutionTools {
|
|
23276
|
-
return new OpenAiAssistantExecutionTools({ ...this.options, assistantId });
|
|
23277
|
-
}
|
|
23278
|
-
*/
|
|
23279
23570
|
/**
|
|
23280
23571
|
* List all available models (non dynamically)
|
|
23281
23572
|
*
|
|
@@ -23310,206 +23601,1259 @@
|
|
|
23310
23601
|
}
|
|
23311
23602
|
}
|
|
23312
23603
|
|
|
23604
|
+
const DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS = 30000;
|
|
23605
|
+
const DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS = 900000;
|
|
23606
|
+
const VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS = 15000;
|
|
23607
|
+
const VECTOR_STORE_STALL_LOG_THRESHOLD_MS = 30000;
|
|
23313
23608
|
/**
|
|
23314
|
-
*
|
|
23609
|
+
* Base class for OpenAI execution tools that need hosted vector stores.
|
|
23315
23610
|
*
|
|
23316
23611
|
* @public exported from `@promptbook/openai`
|
|
23317
23612
|
*/
|
|
23318
|
-
class
|
|
23319
|
-
|
|
23320
|
-
|
|
23321
|
-
|
|
23613
|
+
class OpenAiVectorStoreHandler extends OpenAiExecutionTools {
|
|
23614
|
+
/**
|
|
23615
|
+
* Returns the per-knowledge-source download timeout in milliseconds.
|
|
23616
|
+
*/
|
|
23617
|
+
getKnowledgeSourceDownloadTimeoutMs() {
|
|
23618
|
+
var _a;
|
|
23619
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceDownloadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_DOWNLOAD_TIMEOUT_MS;
|
|
23322
23620
|
}
|
|
23323
|
-
|
|
23324
|
-
|
|
23621
|
+
/**
|
|
23622
|
+
* Returns the max concurrency for knowledge source uploads.
|
|
23623
|
+
*/
|
|
23624
|
+
getKnowledgeSourceUploadMaxConcurrency() {
|
|
23625
|
+
var _a;
|
|
23626
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadMaxConcurrency) !== null && _a !== void 0 ? _a : 5;
|
|
23325
23627
|
}
|
|
23326
|
-
|
|
23327
|
-
|
|
23628
|
+
/**
|
|
23629
|
+
* Returns the polling interval in milliseconds for vector store uploads.
|
|
23630
|
+
*/
|
|
23631
|
+
getKnowledgeSourceUploadPollIntervalMs() {
|
|
23632
|
+
var _a;
|
|
23633
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadPollIntervalMs) !== null && _a !== void 0 ? _a : 5000;
|
|
23328
23634
|
}
|
|
23329
23635
|
/**
|
|
23330
|
-
*
|
|
23636
|
+
* Returns the overall upload timeout in milliseconds for vector store uploads.
|
|
23331
23637
|
*/
|
|
23332
|
-
|
|
23638
|
+
getKnowledgeSourceUploadTimeoutMs() {
|
|
23639
|
+
var _a;
|
|
23640
|
+
return (_a = this.vectorStoreOptions.knowledgeSourceUploadTimeoutMs) !== null && _a !== void 0 ? _a : DEFAULT_KNOWLEDGE_SOURCE_UPLOAD_TIMEOUT_MS;
|
|
23641
|
+
}
|
|
23642
|
+
/**
|
|
23643
|
+
* Returns true if we should continue even if vector store ingestion stalls.
|
|
23644
|
+
*/
|
|
23645
|
+
shouldContinueOnVectorStoreStall() {
|
|
23646
|
+
var _a;
|
|
23647
|
+
return (_a = this.vectorStoreOptions.shouldContinueOnVectorStoreStall) !== null && _a !== void 0 ? _a : true;
|
|
23648
|
+
}
|
|
23649
|
+
/**
|
|
23650
|
+
* Returns vector-store-specific options with extended settings.
|
|
23651
|
+
*/
|
|
23652
|
+
get vectorStoreOptions() {
|
|
23653
|
+
return this.options;
|
|
23654
|
+
}
|
|
23655
|
+
/**
|
|
23656
|
+
* Returns the OpenAI vector stores API surface, supporting stable and beta SDKs.
|
|
23657
|
+
*/
|
|
23658
|
+
getVectorStoresApi(client) {
|
|
23659
|
+
var _a, _b;
|
|
23660
|
+
const vectorStores = (_a = client.vectorStores) !== null && _a !== void 0 ? _a : (_b = client.beta) === null || _b === void 0 ? void 0 : _b.vectorStores;
|
|
23661
|
+
if (!vectorStores) {
|
|
23662
|
+
throw new Error('OpenAI client does not support vector stores. Please ensure you are using a compatible version of the OpenAI SDK with vector store support.');
|
|
23663
|
+
}
|
|
23664
|
+
return vectorStores;
|
|
23665
|
+
}
|
|
23666
|
+
/**
|
|
23667
|
+
* Downloads a knowledge source URL into a File for vector store upload.
|
|
23668
|
+
*/
|
|
23669
|
+
async downloadKnowledgeSourceFile(options) {
|
|
23670
|
+
var _a;
|
|
23671
|
+
const { source, timeoutMs, logLabel } = options;
|
|
23672
|
+
const startedAtMs = Date.now();
|
|
23673
|
+
const controller = new AbortController();
|
|
23674
|
+
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
|
|
23333
23675
|
if (this.options.isVerbose) {
|
|
23334
|
-
console.info('
|
|
23676
|
+
console.info('[🤰]', 'Downloading knowledge source', {
|
|
23677
|
+
source,
|
|
23678
|
+
timeoutMs,
|
|
23679
|
+
logLabel,
|
|
23680
|
+
});
|
|
23335
23681
|
}
|
|
23336
|
-
|
|
23337
|
-
|
|
23338
|
-
|
|
23339
|
-
|
|
23682
|
+
try {
|
|
23683
|
+
const response = await fetch(source, { signal: controller.signal });
|
|
23684
|
+
const contentType = (_a = response.headers.get('content-type')) !== null && _a !== void 0 ? _a : undefined;
|
|
23685
|
+
if (!response.ok) {
|
|
23686
|
+
console.error('[🤰]', 'Failed to download knowledge source', {
|
|
23687
|
+
source,
|
|
23688
|
+
status: response.status,
|
|
23689
|
+
statusText: response.statusText,
|
|
23690
|
+
contentType,
|
|
23691
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
23692
|
+
logLabel,
|
|
23693
|
+
});
|
|
23694
|
+
return null;
|
|
23695
|
+
}
|
|
23696
|
+
const buffer = await response.arrayBuffer();
|
|
23697
|
+
let filename = source.split('/').pop() || 'downloaded-file';
|
|
23698
|
+
try {
|
|
23699
|
+
const url = new URL(source);
|
|
23700
|
+
filename = url.pathname.split('/').pop() || filename;
|
|
23701
|
+
}
|
|
23702
|
+
catch (error) {
|
|
23703
|
+
// Keep default filename
|
|
23704
|
+
}
|
|
23705
|
+
const file = new File([buffer], filename, contentType ? { type: contentType } : undefined);
|
|
23706
|
+
const elapsedMs = Date.now() - startedAtMs;
|
|
23707
|
+
const sizeBytes = buffer.byteLength;
|
|
23708
|
+
if (this.options.isVerbose) {
|
|
23709
|
+
console.info('[🤰]', 'Downloaded knowledge source', {
|
|
23710
|
+
source,
|
|
23711
|
+
filename,
|
|
23712
|
+
sizeBytes,
|
|
23713
|
+
contentType,
|
|
23714
|
+
elapsedMs,
|
|
23715
|
+
logLabel,
|
|
23716
|
+
});
|
|
23717
|
+
}
|
|
23718
|
+
return { file, sizeBytes, filename, elapsedMs };
|
|
23340
23719
|
}
|
|
23341
|
-
|
|
23342
|
-
|
|
23343
|
-
|
|
23344
|
-
|
|
23345
|
-
|
|
23346
|
-
|
|
23347
|
-
|
|
23348
|
-
|
|
23349
|
-
|
|
23350
|
-
role: msg.sender === 'assistant' ? 'assistant' : 'user',
|
|
23351
|
-
content: msg.content,
|
|
23352
|
-
}));
|
|
23353
|
-
input.push(...previousMessages);
|
|
23720
|
+
catch (error) {
|
|
23721
|
+
assertsError(error);
|
|
23722
|
+
console.error('[🤰]', 'Error downloading knowledge source', {
|
|
23723
|
+
source,
|
|
23724
|
+
elapsedMs: Date.now() - startedAtMs,
|
|
23725
|
+
logLabel,
|
|
23726
|
+
error: serializeError(error),
|
|
23727
|
+
});
|
|
23728
|
+
return null;
|
|
23354
23729
|
}
|
|
23355
|
-
|
|
23356
|
-
|
|
23357
|
-
|
|
23358
|
-
|
|
23359
|
-
|
|
23360
|
-
|
|
23361
|
-
|
|
23362
|
-
|
|
23363
|
-
|
|
23364
|
-
|
|
23365
|
-
if (this.
|
|
23366
|
-
|
|
23367
|
-
|
|
23368
|
-
|
|
23369
|
-
|
|
23370
|
-
|
|
23730
|
+
finally {
|
|
23731
|
+
clearTimeout(timeoutId);
|
|
23732
|
+
}
|
|
23733
|
+
}
|
|
23734
|
+
/**
|
|
23735
|
+
* Logs vector store file batch diagnostics to help trace ingestion stalls or failures.
|
|
23736
|
+
*/
|
|
23737
|
+
async logVectorStoreFileBatchDiagnostics(options) {
|
|
23738
|
+
var _a, _b, _c, _d, _e;
|
|
23739
|
+
const { client, vectorStoreId, batchId, uploadedFiles, logLabel, reason } = options;
|
|
23740
|
+
if (reason === 'stalled' && !this.options.isVerbose) {
|
|
23741
|
+
return;
|
|
23742
|
+
}
|
|
23743
|
+
if (!batchId.startsWith('vsfb_')) {
|
|
23744
|
+
console.error('[🤰]', 'Vector store file batch diagnostics skipped (invalid batch id)', {
|
|
23745
|
+
vectorStoreId,
|
|
23746
|
+
batchId,
|
|
23747
|
+
reason,
|
|
23748
|
+
logLabel,
|
|
23749
|
+
});
|
|
23750
|
+
return;
|
|
23751
|
+
}
|
|
23752
|
+
const fileIdToMetadata = new Map();
|
|
23753
|
+
for (const file of uploadedFiles) {
|
|
23754
|
+
fileIdToMetadata.set(file.fileId, file);
|
|
23755
|
+
}
|
|
23756
|
+
try {
|
|
23757
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23758
|
+
const limit = Math.min(100, Math.max(10, uploadedFiles.length));
|
|
23759
|
+
const batchFilesPage = await vectorStores.fileBatches.listFiles(batchId, {
|
|
23760
|
+
vector_store_id: vectorStoreId,
|
|
23761
|
+
limit,
|
|
23762
|
+
});
|
|
23763
|
+
const batchFiles = (_a = batchFilesPage.data) !== null && _a !== void 0 ? _a : [];
|
|
23764
|
+
const statusCounts = {
|
|
23765
|
+
in_progress: 0,
|
|
23766
|
+
completed: 0,
|
|
23767
|
+
failed: 0,
|
|
23768
|
+
cancelled: 0,
|
|
23769
|
+
};
|
|
23770
|
+
const errorSamples = [];
|
|
23771
|
+
const inProgressSamples = [];
|
|
23772
|
+
const batchFileIds = new Set();
|
|
23773
|
+
for (const file of batchFiles) {
|
|
23774
|
+
const status = (_b = file.status) !== null && _b !== void 0 ? _b : 'unknown';
|
|
23775
|
+
statusCounts[status] = ((_c = statusCounts[status]) !== null && _c !== void 0 ? _c : 0) + 1;
|
|
23776
|
+
const vectorStoreFileId = file.id;
|
|
23777
|
+
const uploadedFileId = (_d = file.file_id) !== null && _d !== void 0 ? _d : file.fileId;
|
|
23778
|
+
const fileId = uploadedFileId !== null && uploadedFileId !== void 0 ? uploadedFileId : vectorStoreFileId;
|
|
23779
|
+
batchFileIds.add(fileId);
|
|
23780
|
+
const metadata = fileIdToMetadata.get(fileId);
|
|
23781
|
+
if (status === 'failed') {
|
|
23782
|
+
errorSamples.push({
|
|
23783
|
+
fileId,
|
|
23784
|
+
status,
|
|
23785
|
+
error: (_e = file.last_error) === null || _e === void 0 ? void 0 : _e.message,
|
|
23786
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
23787
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
23788
|
+
});
|
|
23789
|
+
}
|
|
23790
|
+
if (status === 'in_progress') {
|
|
23791
|
+
inProgressSamples.push({
|
|
23792
|
+
fileId,
|
|
23793
|
+
filename: metadata === null || metadata === void 0 ? void 0 : metadata.filename,
|
|
23794
|
+
vectorStoreFileId: uploadedFileId ? vectorStoreFileId : undefined,
|
|
23795
|
+
});
|
|
23796
|
+
}
|
|
23797
|
+
}
|
|
23798
|
+
const missingSamples = uploadedFiles
|
|
23799
|
+
.filter((file) => !batchFileIds.has(file.fileId))
|
|
23800
|
+
.slice(0, 5)
|
|
23801
|
+
.map((file) => ({
|
|
23802
|
+
fileId: file.fileId,
|
|
23803
|
+
filename: file.filename,
|
|
23804
|
+
sizeBytes: file.sizeBytes,
|
|
23805
|
+
}));
|
|
23806
|
+
const vectorStore = await vectorStores.retrieve(vectorStoreId);
|
|
23807
|
+
const logPayload = {
|
|
23808
|
+
vectorStoreId,
|
|
23809
|
+
batchId,
|
|
23810
|
+
reason,
|
|
23811
|
+
vectorStoreStatus: vectorStore.status,
|
|
23812
|
+
vectorStoreFileCounts: vectorStore.file_counts,
|
|
23813
|
+
vectorStoreUsageBytes: vectorStore.usage_bytes,
|
|
23814
|
+
batchFileCount: batchFiles.length,
|
|
23815
|
+
statusCounts,
|
|
23816
|
+
errorSamples: errorSamples.slice(0, 5),
|
|
23817
|
+
inProgressSamples,
|
|
23818
|
+
missingFileCount: uploadedFiles.length - batchFileIds.size,
|
|
23819
|
+
missingSamples,
|
|
23820
|
+
logLabel,
|
|
23371
23821
|
};
|
|
23822
|
+
const logFunction = reason === 'stalled' ? console.info : console.error;
|
|
23823
|
+
logFunction('[🤰]', 'Vector store file batch diagnostics', logPayload);
|
|
23824
|
+
}
|
|
23825
|
+
catch (error) {
|
|
23826
|
+
assertsError(error);
|
|
23827
|
+
console.error('[🤰]', 'Vector store file batch diagnostics failed', {
|
|
23828
|
+
vectorStoreId,
|
|
23829
|
+
batchId,
|
|
23830
|
+
reason,
|
|
23831
|
+
logLabel,
|
|
23832
|
+
error: serializeError(error),
|
|
23833
|
+
});
|
|
23834
|
+
}
|
|
23835
|
+
}
|
|
23836
|
+
/**
|
|
23837
|
+
* Uploads knowledge source files to the vector store and polls until processing completes.
|
|
23838
|
+
*/
|
|
23839
|
+
async uploadKnowledgeSourceFilesToVectorStore(options) {
|
|
23840
|
+
var _a, _b, _c, _d, _e, _f;
|
|
23841
|
+
const { client, vectorStoreId, files, totalBytes, logLabel } = options;
|
|
23842
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
23843
|
+
const uploadStartedAtMs = Date.now();
|
|
23844
|
+
const maxConcurrency = Math.max(1, this.getKnowledgeSourceUploadMaxConcurrency());
|
|
23845
|
+
const pollIntervalMs = Math.max(1000, this.getKnowledgeSourceUploadPollIntervalMs());
|
|
23846
|
+
const uploadTimeoutMs = Math.max(1000, this.getKnowledgeSourceUploadTimeoutMs());
|
|
23847
|
+
if (this.options.isVerbose) {
|
|
23848
|
+
console.info('[🤰]', 'Uploading knowledge source files to OpenAI', {
|
|
23849
|
+
vectorStoreId,
|
|
23850
|
+
fileCount: files.length,
|
|
23851
|
+
totalBytes,
|
|
23852
|
+
maxConcurrency,
|
|
23853
|
+
pollIntervalMs,
|
|
23854
|
+
uploadTimeoutMs,
|
|
23855
|
+
logLabel,
|
|
23856
|
+
});
|
|
23857
|
+
}
|
|
23858
|
+
const fileTypeSummary = {};
|
|
23859
|
+
for (const file of files) {
|
|
23860
|
+
const filename = (_a = file.name) !== null && _a !== void 0 ? _a : '';
|
|
23861
|
+
const extension = filename.includes('.')
|
|
23862
|
+
? (_c = (_b = filename.split('.').pop()) === null || _b === void 0 ? void 0 : _b.toLowerCase()) !== null && _c !== void 0 ? _c : 'unknown'
|
|
23863
|
+
: 'unknown';
|
|
23864
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : 0;
|
|
23865
|
+
const summary = (_d = fileTypeSummary[extension]) !== null && _d !== void 0 ? _d : { count: 0, totalBytes: 0 };
|
|
23866
|
+
summary.count += 1;
|
|
23867
|
+
summary.totalBytes += sizeBytes;
|
|
23868
|
+
fileTypeSummary[extension] = summary;
|
|
23869
|
+
}
|
|
23870
|
+
if (this.options.isVerbose) {
|
|
23871
|
+
console.info('[🤰]', 'Knowledge source file summary', {
|
|
23872
|
+
vectorStoreId,
|
|
23873
|
+
fileCount: files.length,
|
|
23874
|
+
totalBytes,
|
|
23875
|
+
fileTypeSummary,
|
|
23876
|
+
logLabel,
|
|
23877
|
+
});
|
|
23878
|
+
}
|
|
23879
|
+
const fileEntries = files.map((file, index) => ({ file, index }));
|
|
23880
|
+
const fileIterator = fileEntries.values();
|
|
23881
|
+
const fileIds = [];
|
|
23882
|
+
const uploadedFiles = [];
|
|
23883
|
+
const failedUploads = [];
|
|
23884
|
+
let uploadedCount = 0;
|
|
23885
|
+
const processFiles = async (iterator) => {
|
|
23886
|
+
var _a, _b;
|
|
23887
|
+
for (const { file, index } of iterator) {
|
|
23888
|
+
const uploadIndex = index + 1;
|
|
23889
|
+
const filename = file.name || `knowledge-source-${uploadIndex}`;
|
|
23890
|
+
const extension = filename.includes('.')
|
|
23891
|
+
? (_b = (_a = filename.split('.').pop()) === null || _a === void 0 ? void 0 : _a.toLowerCase()) !== null && _b !== void 0 ? _b : 'unknown'
|
|
23892
|
+
: 'unknown';
|
|
23893
|
+
const sizeBytes = typeof file.size === 'number' ? file.size : undefined;
|
|
23894
|
+
const fileUploadStartedAtMs = Date.now();
|
|
23895
|
+
if (this.options.isVerbose) {
|
|
23896
|
+
console.info('[🤰]', 'Uploading knowledge source file', {
|
|
23897
|
+
index: uploadIndex,
|
|
23898
|
+
total: files.length,
|
|
23899
|
+
filename,
|
|
23900
|
+
extension,
|
|
23901
|
+
sizeBytes,
|
|
23902
|
+
logLabel,
|
|
23903
|
+
});
|
|
23904
|
+
}
|
|
23905
|
+
try {
|
|
23906
|
+
const uploaded = await client.files.create({ file, purpose: 'assistants' });
|
|
23907
|
+
fileIds.push(uploaded.id);
|
|
23908
|
+
uploadedFiles.push({ fileId: uploaded.id, filename, sizeBytes });
|
|
23909
|
+
uploadedCount += 1;
|
|
23910
|
+
if (this.options.isVerbose) {
|
|
23911
|
+
console.info('[🤰]', 'Uploaded knowledge source file', {
|
|
23912
|
+
index: uploadIndex,
|
|
23913
|
+
total: files.length,
|
|
23914
|
+
filename,
|
|
23915
|
+
sizeBytes,
|
|
23916
|
+
fileId: uploaded.id,
|
|
23917
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
23918
|
+
logLabel,
|
|
23919
|
+
});
|
|
23920
|
+
}
|
|
23921
|
+
}
|
|
23922
|
+
catch (error) {
|
|
23923
|
+
assertsError(error);
|
|
23924
|
+
const serializedError = serializeError(error);
|
|
23925
|
+
failedUploads.push({ index: uploadIndex, filename, error: serializedError });
|
|
23926
|
+
console.error('[🤰]', 'Failed to upload knowledge source file', {
|
|
23927
|
+
index: uploadIndex,
|
|
23928
|
+
total: files.length,
|
|
23929
|
+
filename,
|
|
23930
|
+
sizeBytes,
|
|
23931
|
+
elapsedMs: Date.now() - fileUploadStartedAtMs,
|
|
23932
|
+
logLabel,
|
|
23933
|
+
error: serializedError,
|
|
23934
|
+
});
|
|
23935
|
+
}
|
|
23936
|
+
}
|
|
23937
|
+
};
|
|
23938
|
+
const workerCount = Math.min(maxConcurrency, files.length);
|
|
23939
|
+
const workers = Array.from({ length: workerCount }, () => processFiles(fileIterator));
|
|
23940
|
+
await Promise.all(workers);
|
|
23941
|
+
if (this.options.isVerbose) {
|
|
23942
|
+
console.info('[🤰]', 'Finished uploading knowledge source files', {
|
|
23943
|
+
vectorStoreId,
|
|
23944
|
+
fileCount: files.length,
|
|
23945
|
+
uploadedCount,
|
|
23946
|
+
failedCount: failedUploads.length,
|
|
23947
|
+
elapsedMs: Date.now() - uploadStartedAtMs,
|
|
23948
|
+
failedSamples: failedUploads.slice(0, 3),
|
|
23949
|
+
logLabel,
|
|
23950
|
+
});
|
|
23951
|
+
}
|
|
23952
|
+
if (fileIds.length === 0) {
|
|
23953
|
+
console.error('[🤰]', 'No knowledge source files were uploaded', {
|
|
23954
|
+
vectorStoreId,
|
|
23955
|
+
fileCount: files.length,
|
|
23956
|
+
failedCount: failedUploads.length,
|
|
23957
|
+
logLabel,
|
|
23958
|
+
});
|
|
23959
|
+
return null;
|
|
23960
|
+
}
|
|
23961
|
+
const batch = await vectorStores.fileBatches.create(vectorStoreId, {
|
|
23962
|
+
file_ids: fileIds,
|
|
23963
|
+
});
|
|
23964
|
+
const expectedBatchId = batch.id;
|
|
23965
|
+
const expectedBatchIdValid = expectedBatchId.startsWith('vsfb_');
|
|
23966
|
+
if (!expectedBatchIdValid) {
|
|
23967
|
+
console.error('[🤰]', 'Vector store file batch id looks invalid', {
|
|
23968
|
+
vectorStoreId,
|
|
23969
|
+
batchId: expectedBatchId,
|
|
23970
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
23971
|
+
logLabel,
|
|
23972
|
+
});
|
|
23973
|
+
}
|
|
23974
|
+
else if (batch.vector_store_id !== vectorStoreId) {
|
|
23975
|
+
console.error('[🤰]', 'Vector store file batch vector store id mismatch', {
|
|
23976
|
+
vectorStoreId,
|
|
23977
|
+
batchId: expectedBatchId,
|
|
23978
|
+
batchVectorStoreId: batch.vector_store_id,
|
|
23979
|
+
logLabel,
|
|
23980
|
+
});
|
|
23981
|
+
}
|
|
23982
|
+
if (this.options.isVerbose) {
|
|
23983
|
+
console.info('[🤰]', 'Created vector store file batch', {
|
|
23984
|
+
vectorStoreId,
|
|
23985
|
+
batchId: expectedBatchId,
|
|
23986
|
+
fileCount: fileIds.length,
|
|
23987
|
+
logLabel,
|
|
23988
|
+
});
|
|
23989
|
+
}
|
|
23990
|
+
const pollStartedAtMs = Date.now();
|
|
23991
|
+
const progressLogIntervalMs = Math.max(VECTOR_STORE_PROGRESS_LOG_INTERVAL_MIN_MS, pollIntervalMs);
|
|
23992
|
+
const diagnosticsIntervalMs = Math.max(60000, pollIntervalMs * 5);
|
|
23993
|
+
// let lastStatus: string | undefined;
|
|
23994
|
+
let lastCountsKey = '';
|
|
23995
|
+
let lastProgressKey = '';
|
|
23996
|
+
let lastLogAtMs = 0;
|
|
23997
|
+
let lastProgressAtMs = pollStartedAtMs;
|
|
23998
|
+
let lastDiagnosticsAtMs = pollStartedAtMs;
|
|
23999
|
+
let latestBatch = batch;
|
|
24000
|
+
let loggedBatchIdMismatch = false;
|
|
24001
|
+
let loggedBatchIdFallback = false;
|
|
24002
|
+
let loggedBatchIdInvalid = false;
|
|
24003
|
+
let shouldPoll = true;
|
|
24004
|
+
while (shouldPoll) {
|
|
24005
|
+
const nowMs = Date.now();
|
|
24006
|
+
// [🤰] Note: Sometimes OpenAI returns Vector Store object instead of Batch object, or IDs get swapped.
|
|
24007
|
+
const rawBatchId = typeof latestBatch.id === 'string' ? latestBatch.id : '';
|
|
24008
|
+
const rawVectorStoreId = latestBatch.vector_store_id;
|
|
24009
|
+
let returnedBatchId = rawBatchId;
|
|
24010
|
+
let returnedBatchIdValid = typeof returnedBatchId === 'string' && returnedBatchId.startsWith('vsfb_');
|
|
24011
|
+
if (!returnedBatchIdValid && expectedBatchIdValid) {
|
|
24012
|
+
if (!loggedBatchIdFallback) {
|
|
24013
|
+
console.error('[🤰]', 'Vector store file batch id missing from response; falling back to expected', {
|
|
24014
|
+
vectorStoreId,
|
|
24015
|
+
expectedBatchId,
|
|
24016
|
+
returnedBatchId,
|
|
24017
|
+
rawVectorStoreId,
|
|
24018
|
+
logLabel,
|
|
24019
|
+
});
|
|
24020
|
+
loggedBatchIdFallback = true;
|
|
24021
|
+
}
|
|
24022
|
+
returnedBatchId = expectedBatchId;
|
|
24023
|
+
returnedBatchIdValid = true;
|
|
24024
|
+
}
|
|
24025
|
+
if (!returnedBatchIdValid && !loggedBatchIdInvalid) {
|
|
24026
|
+
console.error('[🤰]', 'Vector store file batch id is invalid; stopping polling', {
|
|
24027
|
+
vectorStoreId,
|
|
24028
|
+
expectedBatchId,
|
|
24029
|
+
returnedBatchId,
|
|
24030
|
+
rawVectorStoreId,
|
|
24031
|
+
logLabel,
|
|
24032
|
+
});
|
|
24033
|
+
loggedBatchIdInvalid = true;
|
|
24034
|
+
}
|
|
24035
|
+
const batchIdMismatch = expectedBatchIdValid && returnedBatchIdValid && returnedBatchId !== expectedBatchId;
|
|
24036
|
+
if (batchIdMismatch && !loggedBatchIdMismatch) {
|
|
24037
|
+
console.error('[🤰]', 'Vector store file batch id mismatch', {
|
|
24038
|
+
vectorStoreId,
|
|
24039
|
+
expectedBatchId,
|
|
24040
|
+
returnedBatchId,
|
|
24041
|
+
logLabel,
|
|
24042
|
+
});
|
|
24043
|
+
loggedBatchIdMismatch = true;
|
|
24044
|
+
}
|
|
24045
|
+
if (returnedBatchIdValid) {
|
|
24046
|
+
latestBatch = await vectorStores.fileBatches.retrieve(returnedBatchId, {
|
|
24047
|
+
vector_store_id: vectorStoreId,
|
|
24048
|
+
});
|
|
24049
|
+
}
|
|
24050
|
+
else {
|
|
24051
|
+
shouldPoll = false;
|
|
24052
|
+
continue;
|
|
24053
|
+
}
|
|
24054
|
+
const status = (_e = latestBatch.status) !== null && _e !== void 0 ? _e : 'unknown';
|
|
24055
|
+
const fileCounts = (_f = latestBatch.file_counts) !== null && _f !== void 0 ? _f : {};
|
|
24056
|
+
const progressKey = JSON.stringify(fileCounts);
|
|
24057
|
+
const statusCountsKey = `${status}-${progressKey}`;
|
|
24058
|
+
const isProgressing = progressKey !== lastProgressKey;
|
|
24059
|
+
if (isProgressing) {
|
|
24060
|
+
lastProgressAtMs = nowMs;
|
|
24061
|
+
lastProgressKey = progressKey;
|
|
24062
|
+
}
|
|
24063
|
+
if (this.options.isVerbose &&
|
|
24064
|
+
(statusCountsKey !== lastCountsKey || nowMs - lastLogAtMs >= progressLogIntervalMs)) {
|
|
24065
|
+
console.info('[🤰]', 'Vector store file batch status', {
|
|
24066
|
+
vectorStoreId,
|
|
24067
|
+
batchId: returnedBatchId,
|
|
24068
|
+
status,
|
|
24069
|
+
fileCounts,
|
|
24070
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24071
|
+
logLabel,
|
|
24072
|
+
});
|
|
24073
|
+
lastCountsKey = statusCountsKey;
|
|
24074
|
+
lastLogAtMs = nowMs;
|
|
24075
|
+
}
|
|
24076
|
+
if (status === 'in_progress' &&
|
|
24077
|
+
nowMs - lastProgressAtMs >= VECTOR_STORE_STALL_LOG_THRESHOLD_MS &&
|
|
24078
|
+
nowMs - lastDiagnosticsAtMs >= diagnosticsIntervalMs) {
|
|
24079
|
+
lastDiagnosticsAtMs = nowMs;
|
|
24080
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24081
|
+
client,
|
|
24082
|
+
vectorStoreId,
|
|
24083
|
+
batchId: returnedBatchId,
|
|
24084
|
+
uploadedFiles,
|
|
24085
|
+
logLabel,
|
|
24086
|
+
reason: 'stalled',
|
|
24087
|
+
});
|
|
24088
|
+
}
|
|
24089
|
+
if (status === 'completed') {
|
|
24090
|
+
if (this.options.isVerbose) {
|
|
24091
|
+
console.info('[🤰]', 'Vector store file batch completed', {
|
|
24092
|
+
vectorStoreId,
|
|
24093
|
+
batchId: returnedBatchId,
|
|
24094
|
+
fileCounts,
|
|
24095
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24096
|
+
logLabel,
|
|
24097
|
+
});
|
|
24098
|
+
}
|
|
24099
|
+
shouldPoll = false;
|
|
24100
|
+
continue;
|
|
24101
|
+
}
|
|
24102
|
+
if (status === 'failed') {
|
|
24103
|
+
console.error('[🤰]', 'Vector store file batch completed with failures', {
|
|
24104
|
+
vectorStoreId,
|
|
24105
|
+
batchId: returnedBatchId,
|
|
24106
|
+
fileCounts,
|
|
24107
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24108
|
+
logLabel,
|
|
24109
|
+
});
|
|
24110
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24111
|
+
client,
|
|
24112
|
+
vectorStoreId,
|
|
24113
|
+
batchId: returnedBatchId,
|
|
24114
|
+
uploadedFiles,
|
|
24115
|
+
logLabel,
|
|
24116
|
+
reason: 'failed',
|
|
24117
|
+
});
|
|
24118
|
+
shouldPoll = false;
|
|
24119
|
+
continue;
|
|
24120
|
+
}
|
|
24121
|
+
if (status === 'cancelled') {
|
|
24122
|
+
console.error('[🤰]', 'Vector store file batch did not complete', {
|
|
24123
|
+
vectorStoreId,
|
|
24124
|
+
batchId: returnedBatchId,
|
|
24125
|
+
status,
|
|
24126
|
+
fileCounts,
|
|
24127
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24128
|
+
logLabel,
|
|
24129
|
+
});
|
|
24130
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24131
|
+
client,
|
|
24132
|
+
vectorStoreId,
|
|
24133
|
+
batchId: returnedBatchId,
|
|
24134
|
+
uploadedFiles,
|
|
24135
|
+
logLabel,
|
|
24136
|
+
reason: 'failed',
|
|
24137
|
+
});
|
|
24138
|
+
shouldPoll = false;
|
|
24139
|
+
continue;
|
|
24140
|
+
}
|
|
24141
|
+
if (nowMs - pollStartedAtMs >= uploadTimeoutMs) {
|
|
24142
|
+
console.error('[🤰]', 'Timed out waiting for vector store file batch', {
|
|
24143
|
+
vectorStoreId,
|
|
24144
|
+
batchId: returnedBatchId,
|
|
24145
|
+
fileCounts,
|
|
24146
|
+
elapsedMs: nowMs - pollStartedAtMs,
|
|
24147
|
+
uploadTimeoutMs,
|
|
24148
|
+
logLabel,
|
|
24149
|
+
});
|
|
24150
|
+
await this.logVectorStoreFileBatchDiagnostics({
|
|
24151
|
+
client,
|
|
24152
|
+
vectorStoreId,
|
|
24153
|
+
batchId: returnedBatchId,
|
|
24154
|
+
uploadedFiles,
|
|
24155
|
+
logLabel,
|
|
24156
|
+
reason: 'timeout',
|
|
24157
|
+
});
|
|
24158
|
+
if (this.shouldContinueOnVectorStoreStall()) {
|
|
24159
|
+
console.warn('[🤰]', 'Continuing despite vector store timeout as requested', {
|
|
24160
|
+
vectorStoreId,
|
|
24161
|
+
logLabel,
|
|
24162
|
+
});
|
|
24163
|
+
shouldPoll = false;
|
|
24164
|
+
continue;
|
|
24165
|
+
}
|
|
24166
|
+
try {
|
|
24167
|
+
const cancelBatchId = batchIdMismatch && returnedBatchId.startsWith('vsfb_') ? returnedBatchId : expectedBatchId;
|
|
24168
|
+
if (!cancelBatchId.startsWith('vsfb_')) {
|
|
24169
|
+
console.error('[🤰]', 'Skipping vector store file batch cancel (invalid batch id)', {
|
|
24170
|
+
vectorStoreId,
|
|
24171
|
+
batchId: cancelBatchId,
|
|
24172
|
+
logLabel,
|
|
24173
|
+
});
|
|
24174
|
+
}
|
|
24175
|
+
else {
|
|
24176
|
+
await vectorStores.fileBatches.cancel(cancelBatchId, {
|
|
24177
|
+
vector_store_id: vectorStoreId,
|
|
24178
|
+
});
|
|
24179
|
+
}
|
|
24180
|
+
if (this.options.isVerbose) {
|
|
24181
|
+
console.info('[🤰]', 'Cancelled vector store file batch after timeout', {
|
|
24182
|
+
vectorStoreId,
|
|
24183
|
+
batchId: batchIdMismatch && returnedBatchId.startsWith('vsfb_')
|
|
24184
|
+
? returnedBatchId
|
|
24185
|
+
: expectedBatchId,
|
|
24186
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
24187
|
+
logLabel,
|
|
24188
|
+
});
|
|
24189
|
+
}
|
|
24190
|
+
}
|
|
24191
|
+
catch (error) {
|
|
24192
|
+
assertsError(error);
|
|
24193
|
+
console.error('[🤰]', 'Failed to cancel vector store file batch after timeout', {
|
|
24194
|
+
vectorStoreId,
|
|
24195
|
+
batchId: expectedBatchId,
|
|
24196
|
+
...(batchIdMismatch ? { returnedBatchId } : {}),
|
|
24197
|
+
logLabel,
|
|
24198
|
+
error: serializeError(error),
|
|
24199
|
+
});
|
|
24200
|
+
}
|
|
24201
|
+
shouldPoll = false;
|
|
24202
|
+
continue;
|
|
24203
|
+
}
|
|
24204
|
+
await new Promise((resolve) => setTimeout(resolve, pollIntervalMs));
|
|
24205
|
+
}
|
|
24206
|
+
return latestBatch;
|
|
24207
|
+
}
|
|
24208
|
+
/**
|
|
24209
|
+
* Creates a vector store and uploads knowledge sources, returning its ID.
|
|
24210
|
+
*/
|
|
24211
|
+
async createVectorStoreWithKnowledgeSources(options) {
|
|
24212
|
+
const { client, name, knowledgeSources, logLabel } = options;
|
|
24213
|
+
const vectorStores = this.getVectorStoresApi(client);
|
|
24214
|
+
const knowledgeSourcesCount = knowledgeSources.length;
|
|
24215
|
+
const downloadTimeoutMs = this.getKnowledgeSourceDownloadTimeoutMs();
|
|
24216
|
+
if (this.options.isVerbose) {
|
|
24217
|
+
console.info('[🤰]', 'Creating vector store with knowledge sources', {
|
|
24218
|
+
name,
|
|
24219
|
+
knowledgeSourcesCount,
|
|
24220
|
+
downloadTimeoutMs,
|
|
24221
|
+
logLabel,
|
|
24222
|
+
});
|
|
24223
|
+
}
|
|
24224
|
+
const vectorStore = await vectorStores.create({
|
|
24225
|
+
name: `${name} Knowledge Base`,
|
|
24226
|
+
});
|
|
24227
|
+
const vectorStoreId = vectorStore.id;
|
|
24228
|
+
if (this.options.isVerbose) {
|
|
24229
|
+
console.info('[🤰]', 'Vector store created', {
|
|
24230
|
+
vectorStoreId,
|
|
24231
|
+
logLabel,
|
|
24232
|
+
});
|
|
24233
|
+
}
|
|
24234
|
+
const fileStreams = [];
|
|
24235
|
+
const skippedSources = [];
|
|
24236
|
+
let totalBytes = 0;
|
|
24237
|
+
const processingStartedAtMs = Date.now();
|
|
24238
|
+
for (const [index, source] of knowledgeSources.entries()) {
|
|
24239
|
+
try {
|
|
24240
|
+
const isDataUrl = isDataUrlKnowledgeSource(source);
|
|
24241
|
+
const isHttp = source.startsWith('http://') || source.startsWith('https://');
|
|
24242
|
+
const sourceType = isDataUrl ? 'data_url' : isHttp ? 'url' : 'file';
|
|
24243
|
+
if (this.options.isVerbose) {
|
|
24244
|
+
console.info('[🤰]', 'Processing knowledge source', {
|
|
24245
|
+
index: index + 1,
|
|
24246
|
+
total: knowledgeSourcesCount,
|
|
24247
|
+
source,
|
|
24248
|
+
sourceType,
|
|
24249
|
+
logLabel,
|
|
24250
|
+
});
|
|
24251
|
+
}
|
|
24252
|
+
if (isDataUrl) {
|
|
24253
|
+
const parsed = parseDataUrlKnowledgeSource(source);
|
|
24254
|
+
if (!parsed) {
|
|
24255
|
+
skippedSources.push({ source, reason: 'invalid_data_url' });
|
|
24256
|
+
if (this.options.isVerbose) {
|
|
24257
|
+
console.info('[🤰]', 'Skipping knowledge source (invalid data URL)', {
|
|
24258
|
+
source,
|
|
24259
|
+
sourceType,
|
|
24260
|
+
logLabel,
|
|
24261
|
+
});
|
|
24262
|
+
}
|
|
24263
|
+
continue;
|
|
24264
|
+
}
|
|
24265
|
+
const dataUrlFile = new File([parsed.buffer], parsed.filename, {
|
|
24266
|
+
type: parsed.mimeType,
|
|
24267
|
+
});
|
|
24268
|
+
fileStreams.push(dataUrlFile);
|
|
24269
|
+
totalBytes += parsed.buffer.length;
|
|
24270
|
+
continue;
|
|
24271
|
+
}
|
|
24272
|
+
if (isHttp) {
|
|
24273
|
+
const downloadResult = await this.downloadKnowledgeSourceFile({
|
|
24274
|
+
source,
|
|
24275
|
+
timeoutMs: downloadTimeoutMs,
|
|
24276
|
+
logLabel,
|
|
24277
|
+
});
|
|
24278
|
+
if (downloadResult) {
|
|
24279
|
+
fileStreams.push(downloadResult.file);
|
|
24280
|
+
totalBytes += downloadResult.sizeBytes;
|
|
24281
|
+
}
|
|
24282
|
+
else {
|
|
24283
|
+
skippedSources.push({ source, reason: 'download_failed' });
|
|
24284
|
+
}
|
|
24285
|
+
}
|
|
24286
|
+
else {
|
|
24287
|
+
skippedSources.push({ source, reason: 'unsupported_source_type' });
|
|
24288
|
+
if (this.options.isVerbose) {
|
|
24289
|
+
console.info('[🤰]', 'Skipping knowledge source (unsupported type)', {
|
|
24290
|
+
source,
|
|
24291
|
+
sourceType,
|
|
24292
|
+
logLabel,
|
|
24293
|
+
});
|
|
24294
|
+
}
|
|
24295
|
+
/*
|
|
24296
|
+
TODO: [🤰] Resolve problem with browser environment
|
|
24297
|
+
// Assume it's a local file path
|
|
24298
|
+
// Note: This will work in Node.js environment
|
|
24299
|
+
// For browser environments, this would need different handling
|
|
24300
|
+
const fs = await import('fs');
|
|
24301
|
+
const fileStream = fs.createReadStream(source);
|
|
24302
|
+
fileStreams.push(fileStream);
|
|
24303
|
+
*/
|
|
24304
|
+
}
|
|
24305
|
+
}
|
|
24306
|
+
catch (error) {
|
|
24307
|
+
assertsError(error);
|
|
24308
|
+
skippedSources.push({ source, reason: 'processing_error' });
|
|
24309
|
+
console.error('[🤰]', 'Error processing knowledge source', {
|
|
24310
|
+
source,
|
|
24311
|
+
logLabel,
|
|
24312
|
+
error: serializeError(error),
|
|
24313
|
+
});
|
|
24314
|
+
}
|
|
24315
|
+
}
|
|
24316
|
+
if (this.options.isVerbose) {
|
|
24317
|
+
console.info('[🤰]', 'Finished processing knowledge sources', {
|
|
24318
|
+
total: knowledgeSourcesCount,
|
|
24319
|
+
downloadedCount: fileStreams.length,
|
|
24320
|
+
skippedCount: skippedSources.length,
|
|
24321
|
+
totalBytes,
|
|
24322
|
+
elapsedMs: Date.now() - processingStartedAtMs,
|
|
24323
|
+
skippedSamples: skippedSources.slice(0, 3),
|
|
24324
|
+
logLabel,
|
|
24325
|
+
});
|
|
24326
|
+
}
|
|
24327
|
+
if (fileStreams.length > 0) {
|
|
24328
|
+
if (this.options.isVerbose) {
|
|
24329
|
+
console.info('[🤰]', 'Uploading files to vector store', {
|
|
24330
|
+
vectorStoreId,
|
|
24331
|
+
fileCount: fileStreams.length,
|
|
24332
|
+
totalBytes,
|
|
24333
|
+
maxConcurrency: this.getKnowledgeSourceUploadMaxConcurrency(),
|
|
24334
|
+
pollIntervalMs: this.getKnowledgeSourceUploadPollIntervalMs(),
|
|
24335
|
+
uploadTimeoutMs: this.getKnowledgeSourceUploadTimeoutMs(),
|
|
24336
|
+
logLabel,
|
|
24337
|
+
});
|
|
24338
|
+
}
|
|
24339
|
+
try {
|
|
24340
|
+
await this.uploadKnowledgeSourceFilesToVectorStore({
|
|
24341
|
+
client,
|
|
24342
|
+
vectorStoreId,
|
|
24343
|
+
files: fileStreams,
|
|
24344
|
+
totalBytes,
|
|
24345
|
+
logLabel,
|
|
24346
|
+
});
|
|
24347
|
+
}
|
|
24348
|
+
catch (error) {
|
|
24349
|
+
assertsError(error);
|
|
24350
|
+
console.error('[🤰]', 'Error uploading files to vector store', {
|
|
24351
|
+
vectorStoreId,
|
|
24352
|
+
logLabel,
|
|
24353
|
+
error: serializeError(error),
|
|
24354
|
+
});
|
|
24355
|
+
}
|
|
24356
|
+
}
|
|
24357
|
+
else if (this.options.isVerbose) {
|
|
24358
|
+
console.info('[🤰]', 'No knowledge source files to upload', {
|
|
24359
|
+
vectorStoreId,
|
|
24360
|
+
skippedCount: skippedSources.length,
|
|
24361
|
+
logLabel,
|
|
24362
|
+
});
|
|
24363
|
+
}
|
|
24364
|
+
return {
|
|
24365
|
+
vectorStoreId,
|
|
24366
|
+
uploadedFileCount: fileStreams.length,
|
|
24367
|
+
skippedCount: skippedSources.length,
|
|
24368
|
+
totalBytes,
|
|
24369
|
+
};
|
|
24370
|
+
}
|
|
24371
|
+
}
|
|
24372
|
+
|
|
24373
|
+
const DEFAULT_AGENT_KIT_MODEL_NAME = 'gpt-5.2';
|
|
24374
|
+
const DEFAULT_JSON_SCHEMA_NAME = 'StructuredOutput';
|
|
24375
|
+
/*
|
|
24376
|
+
TODO: Use or remove
|
|
24377
|
+
const EMPTY_JSON_SCHEMA: JsonSchemaDefinition['schema'] = {
|
|
24378
|
+
type: 'object',
|
|
24379
|
+
properties: {},
|
|
24380
|
+
required: [],
|
|
24381
|
+
additionalProperties: true,
|
|
24382
|
+
};
|
|
24383
|
+
*/
|
|
24384
|
+
function buildJsonSchemaDefinition(jsonSchema) {
|
|
24385
|
+
var _a, _b, _c;
|
|
24386
|
+
const schema = (_a = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.schema) !== null && _a !== void 0 ? _a : {};
|
|
24387
|
+
return {
|
|
24388
|
+
type: 'json_schema',
|
|
24389
|
+
name: (_b = jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.name) !== null && _b !== void 0 ? _b : DEFAULT_JSON_SCHEMA_NAME,
|
|
24390
|
+
strict: Boolean(jsonSchema === null || jsonSchema === void 0 ? void 0 : jsonSchema.strict),
|
|
24391
|
+
schema: {
|
|
24392
|
+
type: 'object',
|
|
24393
|
+
properties: ((_c = schema.properties) !== null && _c !== void 0 ? _c : {}),
|
|
24394
|
+
required: Array.isArray(schema.required) ? schema.required : [],
|
|
24395
|
+
additionalProperties: schema.additionalProperties === undefined ? true : Boolean(schema.additionalProperties),
|
|
24396
|
+
description: schema.description,
|
|
24397
|
+
},
|
|
24398
|
+
};
|
|
24399
|
+
}
|
|
24400
|
+
/**
|
|
24401
|
+
* Maps OpenAI `response_format` payloads to AgentKit output types so the runner can forward
|
|
24402
|
+
* structured-output preferences to OpenAI while still reusing the same AgentKit agent instance.
|
|
24403
|
+
*
|
|
24404
|
+
* @param responseFormat - The OpenAI `response_format` payload from the user request.
|
|
24405
|
+
* @returns An Agent output type compatible with the requested schema or `undefined` when no impact is required.
|
|
24406
|
+
* @private utility of Open AI
|
|
24407
|
+
*/
|
|
24408
|
+
function mapResponseFormatToAgentOutputType(responseFormat) {
|
|
24409
|
+
if (!responseFormat) {
|
|
24410
|
+
return undefined;
|
|
24411
|
+
}
|
|
24412
|
+
if (typeof responseFormat === 'string') {
|
|
24413
|
+
if (responseFormat === 'text') {
|
|
24414
|
+
return 'text';
|
|
24415
|
+
}
|
|
24416
|
+
if (responseFormat === 'json_schema' || responseFormat === 'json_object') {
|
|
24417
|
+
return buildJsonSchemaDefinition();
|
|
24418
|
+
}
|
|
24419
|
+
return 'text';
|
|
24420
|
+
}
|
|
24421
|
+
switch (responseFormat.type) {
|
|
24422
|
+
case 'text':
|
|
24423
|
+
return 'text';
|
|
24424
|
+
case 'json_schema':
|
|
24425
|
+
return buildJsonSchemaDefinition(responseFormat.json_schema);
|
|
24426
|
+
case 'json_object':
|
|
24427
|
+
return buildJsonSchemaDefinition();
|
|
24428
|
+
default:
|
|
24429
|
+
return undefined;
|
|
24430
|
+
}
|
|
24431
|
+
}
|
|
24432
|
+
/**
|
|
24433
|
+
* Execution tools for OpenAI AgentKit (Agents SDK).
|
|
24434
|
+
*
|
|
24435
|
+
* @public exported from `@promptbook/openai`
|
|
24436
|
+
*/
|
|
24437
|
+
class OpenAiAgentKitExecutionTools extends OpenAiVectorStoreHandler {
|
|
24438
|
+
/**
|
|
24439
|
+
* Creates OpenAI AgentKit execution tools.
|
|
24440
|
+
*/
|
|
24441
|
+
constructor(options) {
|
|
24442
|
+
var _a;
|
|
24443
|
+
if (options.isProxied) {
|
|
24444
|
+
throw new NotYetImplementedError(`Proxy mode is not yet implemented for OpenAI AgentKit`);
|
|
24445
|
+
}
|
|
24446
|
+
super(options);
|
|
24447
|
+
this.preparedAgentKitAgent = null;
|
|
24448
|
+
this.agentKitModelName = (_a = options.agentKitModelName) !== null && _a !== void 0 ? _a : DEFAULT_AGENT_KIT_MODEL_NAME;
|
|
24449
|
+
}
|
|
24450
|
+
get title() {
|
|
24451
|
+
return 'OpenAI AgentKit';
|
|
24452
|
+
}
|
|
24453
|
+
get description() {
|
|
24454
|
+
return 'Use OpenAI AgentKit for agent-style chat with tools and knowledge';
|
|
24455
|
+
}
|
|
24456
|
+
/**
|
|
24457
|
+
* Calls OpenAI AgentKit with a chat prompt (non-streaming).
|
|
24458
|
+
*/
|
|
24459
|
+
async callChatModel(prompt) {
|
|
24460
|
+
return this.callChatModelStream(prompt, () => { });
|
|
24461
|
+
}
|
|
24462
|
+
/**
|
|
24463
|
+
* Calls OpenAI AgentKit with a chat prompt (streaming).
|
|
24464
|
+
*/
|
|
24465
|
+
async callChatModelStream(prompt, onProgress) {
|
|
24466
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
24467
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
24468
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
24469
|
+
}
|
|
24470
|
+
for (const key of ['maxTokens', 'modelName', 'seed', 'temperature']) {
|
|
24471
|
+
if (modelRequirements[key] !== undefined) {
|
|
24472
|
+
throw new NotYetImplementedError(`In \`OpenAiAgentKitExecutionTools\` you cannot specify \`${key}\``);
|
|
24473
|
+
}
|
|
24474
|
+
}
|
|
24475
|
+
const rawPromptContent = templateParameters(content, {
|
|
24476
|
+
...parameters,
|
|
24477
|
+
modelName: this.agentKitModelName,
|
|
24478
|
+
});
|
|
24479
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(modelRequirements.responseFormat);
|
|
24480
|
+
const preparedAgentKitAgent = await this.prepareAgentKitAgent({
|
|
24481
|
+
name: (prompt.title || 'Agent'),
|
|
24482
|
+
instructions: modelRequirements.systemMessage || '',
|
|
24483
|
+
knowledgeSources: modelRequirements.knowledgeSources,
|
|
24484
|
+
tools: 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : modelRequirements.tools,
|
|
24485
|
+
});
|
|
24486
|
+
return this.callChatModelStreamWithPreparedAgent({
|
|
24487
|
+
openAiAgentKitAgent: preparedAgentKitAgent.agent,
|
|
24488
|
+
prompt,
|
|
24489
|
+
rawPromptContent,
|
|
24490
|
+
onProgress,
|
|
24491
|
+
responseFormatOutputType,
|
|
24492
|
+
});
|
|
24493
|
+
}
|
|
24494
|
+
/**
|
|
24495
|
+
* Returns a prepared AgentKit agent when the server wants to manage caching externally.
|
|
24496
|
+
*/
|
|
24497
|
+
getPreparedAgentKitAgent() {
|
|
24498
|
+
return this.preparedAgentKitAgent;
|
|
24499
|
+
}
|
|
24500
|
+
/**
|
|
24501
|
+
* Stores a prepared AgentKit agent for later reuse by external cache managers.
|
|
24502
|
+
*/
|
|
24503
|
+
setPreparedAgentKitAgent(preparedAgent) {
|
|
24504
|
+
this.preparedAgentKitAgent = preparedAgent;
|
|
24505
|
+
}
|
|
24506
|
+
/**
|
|
24507
|
+
* Creates a new tools instance bound to a prepared AgentKit agent.
|
|
24508
|
+
*/
|
|
24509
|
+
getPreparedAgentTools(preparedAgent) {
|
|
24510
|
+
const tools = new OpenAiAgentKitExecutionTools(this.agentKitOptions);
|
|
24511
|
+
tools.setPreparedAgentKitAgent(preparedAgent);
|
|
24512
|
+
return tools;
|
|
24513
|
+
}
|
|
24514
|
+
/**
|
|
24515
|
+
* Prepares an AgentKit agent with optional knowledge sources and tool definitions.
|
|
24516
|
+
*/
|
|
24517
|
+
async prepareAgentKitAgent(options) {
|
|
24518
|
+
var _a, _b;
|
|
24519
|
+
const { name, instructions, knowledgeSources, tools, vectorStoreId: cachedVectorStoreId, storeAsPrepared, } = options;
|
|
24520
|
+
await this.ensureAgentKitDefaults();
|
|
24521
|
+
if (this.options.isVerbose) {
|
|
24522
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
24523
|
+
name,
|
|
24524
|
+
instructionsLength: instructions.length,
|
|
24525
|
+
knowledgeSourcesCount: (_a = knowledgeSources === null || knowledgeSources === void 0 ? void 0 : knowledgeSources.length) !== null && _a !== void 0 ? _a : 0,
|
|
24526
|
+
toolsCount: (_b = tools === null || tools === void 0 ? void 0 : tools.length) !== null && _b !== void 0 ? _b : 0,
|
|
24527
|
+
});
|
|
24528
|
+
}
|
|
24529
|
+
let vectorStoreId = cachedVectorStoreId;
|
|
24530
|
+
if (!vectorStoreId && knowledgeSources && knowledgeSources.length > 0) {
|
|
24531
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
24532
|
+
client: await this.getClient(),
|
|
24533
|
+
name,
|
|
24534
|
+
knowledgeSources,
|
|
24535
|
+
logLabel: 'agentkit preparation',
|
|
24536
|
+
});
|
|
24537
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24538
|
+
}
|
|
24539
|
+
else if (vectorStoreId && this.options.isVerbose) {
|
|
24540
|
+
console.info('[🤰]', 'Using cached vector store for AgentKit agent', {
|
|
24541
|
+
name,
|
|
24542
|
+
vectorStoreId,
|
|
24543
|
+
});
|
|
24544
|
+
}
|
|
24545
|
+
const agentKitTools = this.buildAgentKitTools({ tools, vectorStoreId });
|
|
24546
|
+
const openAiAgentKitAgent = new agents.Agent({
|
|
24547
|
+
name,
|
|
24548
|
+
model: this.agentKitModelName,
|
|
24549
|
+
instructions: instructions || 'You are a helpful assistant.',
|
|
24550
|
+
tools: agentKitTools,
|
|
24551
|
+
});
|
|
24552
|
+
const preparedAgent = {
|
|
24553
|
+
agent: openAiAgentKitAgent,
|
|
24554
|
+
vectorStoreId,
|
|
24555
|
+
};
|
|
24556
|
+
if (storeAsPrepared) {
|
|
24557
|
+
this.setPreparedAgentKitAgent(preparedAgent);
|
|
24558
|
+
}
|
|
24559
|
+
if (this.options.isVerbose) {
|
|
24560
|
+
console.info('[🤰]', 'OpenAI AgentKit agent ready', {
|
|
24561
|
+
name,
|
|
24562
|
+
model: this.agentKitModelName,
|
|
24563
|
+
toolCount: agentKitTools.length,
|
|
24564
|
+
hasVectorStore: Boolean(vectorStoreId),
|
|
24565
|
+
});
|
|
24566
|
+
}
|
|
24567
|
+
return preparedAgent;
|
|
24568
|
+
}
|
|
24569
|
+
/**
|
|
24570
|
+
* Ensures the AgentKit SDK is wired to the OpenAI client and API key.
|
|
24571
|
+
*/
|
|
24572
|
+
async ensureAgentKitDefaults() {
|
|
24573
|
+
const client = await this.getClient();
|
|
24574
|
+
agents.setDefaultOpenAIClient(client);
|
|
24575
|
+
const apiKey = this.agentKitOptions.apiKey;
|
|
24576
|
+
if (apiKey && typeof apiKey === 'string') {
|
|
24577
|
+
agents.setDefaultOpenAIKey(apiKey);
|
|
24578
|
+
}
|
|
24579
|
+
}
|
|
24580
|
+
/**
|
|
24581
|
+
* Builds the tool list for AgentKit, including hosted file search when applicable.
|
|
24582
|
+
*/
|
|
24583
|
+
buildAgentKitTools(options) {
|
|
24584
|
+
var _a;
|
|
24585
|
+
const { tools, vectorStoreId } = options;
|
|
24586
|
+
const agentKitTools = [];
|
|
24587
|
+
if (vectorStoreId) {
|
|
24588
|
+
agentKitTools.push(agents.fileSearchTool(vectorStoreId));
|
|
24589
|
+
}
|
|
24590
|
+
if (tools && tools.length > 0) {
|
|
24591
|
+
const scriptTools = this.resolveScriptTools();
|
|
24592
|
+
for (const toolDefinition of tools) {
|
|
24593
|
+
agentKitTools.push(agents.tool({
|
|
24594
|
+
name: toolDefinition.name,
|
|
24595
|
+
description: toolDefinition.description,
|
|
24596
|
+
parameters: toolDefinition.parameters
|
|
24597
|
+
? {
|
|
24598
|
+
...toolDefinition.parameters,
|
|
24599
|
+
additionalProperties: false,
|
|
24600
|
+
required: (_a = toolDefinition.parameters.required) !== null && _a !== void 0 ? _a : [],
|
|
24601
|
+
}
|
|
24602
|
+
: undefined,
|
|
24603
|
+
strict: false,
|
|
24604
|
+
execute: async (input, runContext, details) => {
|
|
24605
|
+
var _a, _b, _c;
|
|
24606
|
+
const scriptTool = scriptTools[0];
|
|
24607
|
+
const functionName = toolDefinition.name;
|
|
24608
|
+
const calledAt = $getCurrentDate();
|
|
24609
|
+
const callId = (_a = details === null || details === void 0 ? void 0 : details.toolCall) === null || _a === void 0 ? void 0 : _a.callId;
|
|
24610
|
+
const functionArgs = input !== null && input !== void 0 ? input : {};
|
|
24611
|
+
if (this.options.isVerbose) {
|
|
24612
|
+
console.info('[🤰]', 'Executing AgentKit tool', {
|
|
24613
|
+
functionName,
|
|
24614
|
+
callId,
|
|
24615
|
+
calledAt,
|
|
24616
|
+
});
|
|
24617
|
+
}
|
|
24618
|
+
try {
|
|
24619
|
+
return await scriptTool.execute({
|
|
24620
|
+
scriptLanguage: 'javascript',
|
|
24621
|
+
script: `
|
|
24622
|
+
const args = ${JSON.stringify(functionArgs)};
|
|
24623
|
+
return await ${functionName}(args);
|
|
24624
|
+
`,
|
|
24625
|
+
parameters: (_c = (_b = runContext === null || runContext === void 0 ? void 0 : runContext.context) === null || _b === void 0 ? void 0 : _b.parameters) !== null && _c !== void 0 ? _c : {},
|
|
24626
|
+
});
|
|
24627
|
+
}
|
|
24628
|
+
catch (error) {
|
|
24629
|
+
assertsError(error);
|
|
24630
|
+
const serializedError = serializeError(error);
|
|
24631
|
+
const errorMessage = spaceTrim__default["default"]((block) => `
|
|
24632
|
+
|
|
24633
|
+
The invoked tool \`${functionName}\` failed with error:
|
|
24634
|
+
|
|
24635
|
+
\`\`\`json
|
|
24636
|
+
${block(JSON.stringify(serializedError, null, 4))}
|
|
24637
|
+
\`\`\`
|
|
24638
|
+
|
|
24639
|
+
`);
|
|
24640
|
+
console.error('[🤰]', 'AgentKit tool execution failed', {
|
|
24641
|
+
functionName,
|
|
24642
|
+
callId,
|
|
24643
|
+
error: serializedError,
|
|
24644
|
+
});
|
|
24645
|
+
return errorMessage;
|
|
24646
|
+
}
|
|
24647
|
+
},
|
|
24648
|
+
}));
|
|
24649
|
+
}
|
|
23372
24650
|
}
|
|
23373
|
-
|
|
23374
|
-
|
|
23375
|
-
|
|
23376
|
-
|
|
23377
|
-
|
|
23378
|
-
|
|
23379
|
-
|
|
23380
|
-
|
|
24651
|
+
return agentKitTools;
|
|
24652
|
+
}
|
|
24653
|
+
/**
|
|
24654
|
+
* Resolves the configured script tools for tool execution.
|
|
24655
|
+
*/
|
|
24656
|
+
resolveScriptTools() {
|
|
24657
|
+
const executionTools = this.options.executionTools;
|
|
24658
|
+
if (!executionTools || !executionTools.script) {
|
|
24659
|
+
throw new PipelineExecutionError(`Model requested tools but no executionTools.script were provided in OpenAiAgentKitExecutionTools options`);
|
|
23381
24660
|
}
|
|
24661
|
+
return Array.isArray(executionTools.script) ? executionTools.script : [executionTools.script];
|
|
24662
|
+
}
|
|
24663
|
+
/**
|
|
24664
|
+
* Runs a prepared AgentKit agent and streams results back to the caller.
|
|
24665
|
+
*/
|
|
24666
|
+
async callChatModelStreamWithPreparedAgent(options) {
|
|
24667
|
+
var _a, _b, _c, _d;
|
|
24668
|
+
const { openAiAgentKitAgent, prompt, onProgress } = options;
|
|
24669
|
+
const rawPromptContent = (_a = options.rawPromptContent) !== null && _a !== void 0 ? _a : templateParameters(prompt.content, {
|
|
24670
|
+
...prompt.parameters,
|
|
24671
|
+
modelName: this.agentKitModelName,
|
|
24672
|
+
});
|
|
24673
|
+
const agentForRun = options.responseFormatOutputType !== undefined
|
|
24674
|
+
? openAiAgentKitAgent.clone({
|
|
24675
|
+
outputType: options.responseFormatOutputType,
|
|
24676
|
+
})
|
|
24677
|
+
: openAiAgentKitAgent;
|
|
23382
24678
|
const start = $getCurrentDate();
|
|
23383
|
-
|
|
24679
|
+
let latestContent = '';
|
|
24680
|
+
const toolCalls = [];
|
|
24681
|
+
const toolCallIndexById = new Map();
|
|
24682
|
+
const inputItems = await this.buildAgentKitInputItems(prompt, rawPromptContent);
|
|
23384
24683
|
const rawRequest = {
|
|
23385
|
-
|
|
23386
|
-
|
|
23387
|
-
input,
|
|
23388
|
-
instructions: modelRequirements.systemMessage,
|
|
23389
|
-
tools: agentTools.length > 0 ? agentTools : undefined,
|
|
23390
|
-
tool_resources: toolResources,
|
|
23391
|
-
store: false, // Stateless by default as we pass full history
|
|
24684
|
+
agentName: agentForRun.name,
|
|
24685
|
+
input: inputItems,
|
|
23392
24686
|
};
|
|
23393
|
-
|
|
23394
|
-
|
|
23395
|
-
|
|
23396
|
-
|
|
23397
|
-
|
|
23398
|
-
|
|
23399
|
-
|
|
23400
|
-
|
|
23401
|
-
|
|
23402
|
-
|
|
23403
|
-
|
|
23404
|
-
|
|
23405
|
-
|
|
23406
|
-
|
|
23407
|
-
|
|
23408
|
-
|
|
23409
|
-
|
|
23410
|
-
|
|
23411
|
-
|
|
23412
|
-
|
|
23413
|
-
|
|
23414
|
-
|
|
23415
|
-
|
|
23416
|
-
|
|
24687
|
+
const streamResult = await agents.run(agentForRun, inputItems, {
|
|
24688
|
+
stream: true,
|
|
24689
|
+
context: { parameters: prompt.parameters },
|
|
24690
|
+
});
|
|
24691
|
+
for await (const event of streamResult) {
|
|
24692
|
+
if (event.type === 'raw_model_stream_event' && ((_b = event.data) === null || _b === void 0 ? void 0 : _b.type) === 'output_text_delta') {
|
|
24693
|
+
latestContent += event.data.delta;
|
|
24694
|
+
onProgress({
|
|
24695
|
+
content: latestContent,
|
|
24696
|
+
modelName: this.agentKitModelName,
|
|
24697
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24698
|
+
usage: UNCERTAIN_USAGE,
|
|
24699
|
+
rawPromptContent: rawPromptContent,
|
|
24700
|
+
rawRequest: null,
|
|
24701
|
+
rawResponse: {},
|
|
24702
|
+
});
|
|
24703
|
+
continue;
|
|
24704
|
+
}
|
|
24705
|
+
if (event.type === 'run_item_stream_event') {
|
|
24706
|
+
const rawItem = (_c = event.item) === null || _c === void 0 ? void 0 : _c.rawItem;
|
|
24707
|
+
if (event.name === 'tool_called' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call') {
|
|
24708
|
+
const toolCall = {
|
|
24709
|
+
name: rawItem.name,
|
|
24710
|
+
arguments: rawItem.arguments,
|
|
24711
|
+
rawToolCall: rawItem,
|
|
24712
|
+
createdAt: $getCurrentDate(),
|
|
24713
|
+
};
|
|
24714
|
+
toolCallIndexById.set(rawItem.callId, toolCalls.length);
|
|
24715
|
+
toolCalls.push(toolCall);
|
|
24716
|
+
onProgress({
|
|
24717
|
+
content: latestContent,
|
|
24718
|
+
modelName: this.agentKitModelName,
|
|
24719
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24720
|
+
usage: UNCERTAIN_USAGE,
|
|
24721
|
+
rawPromptContent: rawPromptContent,
|
|
24722
|
+
rawRequest: null,
|
|
24723
|
+
rawResponse: {},
|
|
24724
|
+
toolCalls: [toolCall],
|
|
24725
|
+
});
|
|
24726
|
+
}
|
|
24727
|
+
if (event.name === 'tool_output' && (rawItem === null || rawItem === void 0 ? void 0 : rawItem.type) === 'function_call_result') {
|
|
24728
|
+
const index = toolCallIndexById.get(rawItem.callId);
|
|
24729
|
+
const result = this.formatAgentKitToolOutput(rawItem.output);
|
|
24730
|
+
if (index !== undefined) {
|
|
24731
|
+
const existingToolCall = toolCalls[index];
|
|
24732
|
+
const completedToolCall = {
|
|
24733
|
+
...existingToolCall,
|
|
24734
|
+
result,
|
|
24735
|
+
rawToolCall: rawItem,
|
|
24736
|
+
};
|
|
24737
|
+
toolCalls[index] = completedToolCall;
|
|
24738
|
+
onProgress({
|
|
24739
|
+
content: latestContent,
|
|
24740
|
+
modelName: this.agentKitModelName,
|
|
24741
|
+
timing: { start, complete: $getCurrentDate() },
|
|
24742
|
+
usage: UNCERTAIN_USAGE,
|
|
24743
|
+
rawPromptContent: rawPromptContent,
|
|
24744
|
+
rawRequest: null,
|
|
24745
|
+
rawResponse: {},
|
|
24746
|
+
toolCalls: [completedToolCall],
|
|
24747
|
+
});
|
|
23417
24748
|
}
|
|
23418
24749
|
}
|
|
23419
|
-
else if (item.type === 'function_call') ;
|
|
23420
24750
|
}
|
|
23421
24751
|
}
|
|
23422
|
-
|
|
23423
|
-
|
|
23424
|
-
|
|
23425
|
-
|
|
23426
|
-
|
|
23427
|
-
|
|
23428
|
-
content: resultContent,
|
|
23429
|
-
modelName: response.model || 'agent',
|
|
24752
|
+
await streamResult.completed;
|
|
24753
|
+
const complete = $getCurrentDate();
|
|
24754
|
+
const finalContent = ((_d = streamResult.finalOutput) !== null && _d !== void 0 ? _d : latestContent);
|
|
24755
|
+
const finalResult = {
|
|
24756
|
+
content: finalContent,
|
|
24757
|
+
modelName: this.agentKitModelName,
|
|
23430
24758
|
timing: { start, complete },
|
|
23431
24759
|
usage: UNCERTAIN_USAGE,
|
|
23432
|
-
rawPromptContent,
|
|
24760
|
+
rawPromptContent: rawPromptContent,
|
|
23433
24761
|
rawRequest,
|
|
23434
|
-
rawResponse:
|
|
23435
|
-
|
|
23436
|
-
|
|
23437
|
-
|
|
23438
|
-
|
|
23439
|
-
order: [],
|
|
23440
|
-
value: {
|
|
23441
|
-
content: resultContent,
|
|
23442
|
-
modelName: response.model || 'agent',
|
|
23443
|
-
timing: { start, complete },
|
|
23444
|
-
usage: UNCERTAIN_USAGE,
|
|
23445
|
-
rawPromptContent,
|
|
23446
|
-
rawRequest,
|
|
23447
|
-
rawResponse: response,
|
|
23448
|
-
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
23449
|
-
},
|
|
23450
|
-
});
|
|
24762
|
+
rawResponse: { runResult: streamResult },
|
|
24763
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
24764
|
+
};
|
|
24765
|
+
onProgress(finalResult);
|
|
24766
|
+
return finalResult;
|
|
23451
24767
|
}
|
|
23452
24768
|
/**
|
|
23453
|
-
*
|
|
24769
|
+
* Builds AgentKit input items from the prompt and optional thread.
|
|
23454
24770
|
*/
|
|
23455
|
-
|
|
23456
|
-
|
|
23457
|
-
const
|
|
23458
|
-
|
|
23459
|
-
|
|
23460
|
-
|
|
23461
|
-
|
|
23462
|
-
|
|
23463
|
-
|
|
23464
|
-
|
|
23465
|
-
|
|
23466
|
-
|
|
23467
|
-
|
|
23468
|
-
const response = await fetch(source);
|
|
23469
|
-
if (!response.ok) {
|
|
23470
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
23471
|
-
continue;
|
|
23472
|
-
}
|
|
23473
|
-
const buffer = await response.arrayBuffer();
|
|
23474
|
-
const filename = source.split('/').pop() || 'downloaded-file';
|
|
23475
|
-
const blob = new Blob([buffer]);
|
|
23476
|
-
const file = new File([blob], filename);
|
|
23477
|
-
fileStreams.push(file);
|
|
24771
|
+
async buildAgentKitInputItems(prompt, rawPromptContent) {
|
|
24772
|
+
var _a;
|
|
24773
|
+
const inputItems = [];
|
|
24774
|
+
if ('thread' in prompt && Array.isArray(prompt.thread)) {
|
|
24775
|
+
for (const message of prompt.thread) {
|
|
24776
|
+
const sender = message.sender;
|
|
24777
|
+
const content = (_a = message.content) !== null && _a !== void 0 ? _a : '';
|
|
24778
|
+
if (sender === 'assistant' || sender === 'agent') {
|
|
24779
|
+
inputItems.push({
|
|
24780
|
+
role: 'assistant',
|
|
24781
|
+
status: 'completed',
|
|
24782
|
+
content: [{ type: 'output_text', text: content }],
|
|
24783
|
+
});
|
|
23478
24784
|
}
|
|
23479
24785
|
else {
|
|
23480
|
-
|
|
24786
|
+
inputItems.push({
|
|
24787
|
+
role: 'user',
|
|
24788
|
+
content,
|
|
24789
|
+
});
|
|
23481
24790
|
}
|
|
23482
24791
|
}
|
|
23483
|
-
catch (error) {
|
|
23484
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
23485
|
-
}
|
|
23486
24792
|
}
|
|
23487
|
-
|
|
23488
|
-
|
|
23489
|
-
|
|
23490
|
-
|
|
23491
|
-
|
|
23492
|
-
|
|
23493
|
-
|
|
23494
|
-
|
|
23495
|
-
|
|
24793
|
+
const userContent = await this.buildAgentKitUserContent(prompt, rawPromptContent);
|
|
24794
|
+
inputItems.push({
|
|
24795
|
+
role: 'user',
|
|
24796
|
+
content: userContent,
|
|
24797
|
+
});
|
|
24798
|
+
return inputItems;
|
|
24799
|
+
}
|
|
24800
|
+
/**
|
|
24801
|
+
* Builds the user message content for AgentKit runs, including file inputs when provided.
|
|
24802
|
+
*/
|
|
24803
|
+
async buildAgentKitUserContent(prompt, rawPromptContent) {
|
|
24804
|
+
if ('files' in prompt && Array.isArray(prompt.files) && prompt.files.length > 0) {
|
|
24805
|
+
const fileItems = await Promise.all(prompt.files.map(async (file) => {
|
|
24806
|
+
const arrayBuffer = await file.arrayBuffer();
|
|
24807
|
+
const base64 = Buffer.from(arrayBuffer).toString('base64');
|
|
24808
|
+
return {
|
|
24809
|
+
type: 'input_image',
|
|
24810
|
+
image: `data:${file.type};base64,${base64}`,
|
|
24811
|
+
};
|
|
24812
|
+
}));
|
|
24813
|
+
return [{ type: 'input_text', text: rawPromptContent }, ...fileItems];
|
|
24814
|
+
}
|
|
24815
|
+
return rawPromptContent;
|
|
24816
|
+
}
|
|
24817
|
+
/**
|
|
24818
|
+
* Normalizes AgentKit tool outputs into a string for Promptbook tool call results.
|
|
24819
|
+
*/
|
|
24820
|
+
formatAgentKitToolOutput(output) {
|
|
24821
|
+
if (typeof output === 'string') {
|
|
24822
|
+
return output;
|
|
24823
|
+
}
|
|
24824
|
+
if (output && typeof output === 'object') {
|
|
24825
|
+
const textOutput = output;
|
|
24826
|
+
if (textOutput.type === 'text' && typeof textOutput.text === 'string') {
|
|
24827
|
+
return textOutput.text;
|
|
23496
24828
|
}
|
|
23497
24829
|
}
|
|
23498
|
-
return
|
|
24830
|
+
return JSON.stringify(output !== null && output !== void 0 ? output : null);
|
|
23499
24831
|
}
|
|
23500
24832
|
/**
|
|
23501
|
-
*
|
|
24833
|
+
* Returns AgentKit-specific options.
|
|
24834
|
+
*/
|
|
24835
|
+
get agentKitOptions() {
|
|
24836
|
+
return this.options;
|
|
24837
|
+
}
|
|
24838
|
+
/**
|
|
24839
|
+
* Discriminant for type guards.
|
|
23502
24840
|
*/
|
|
23503
24841
|
get discriminant() {
|
|
23504
|
-
return
|
|
24842
|
+
return DISCRIMINANT$1;
|
|
23505
24843
|
}
|
|
23506
24844
|
/**
|
|
23507
|
-
* Type guard to check if given `LlmExecutionTools` are instanceof `
|
|
24845
|
+
* Type guard to check if given `LlmExecutionTools` are instanceof `OpenAiAgentKitExecutionTools`.
|
|
23508
24846
|
*/
|
|
23509
|
-
static
|
|
23510
|
-
return llmExecutionTools.discriminant ===
|
|
24847
|
+
static isOpenAiAgentKitExecutionTools(llmExecutionTools) {
|
|
24848
|
+
return llmExecutionTools.discriminant === DISCRIMINANT$1;
|
|
23511
24849
|
}
|
|
23512
24850
|
}
|
|
24851
|
+
/**
|
|
24852
|
+
* Discriminant for type guards.
|
|
24853
|
+
*
|
|
24854
|
+
* @private const of `OpenAiAgentKitExecutionTools`
|
|
24855
|
+
*/
|
|
24856
|
+
const DISCRIMINANT$1 = 'OPEN_AI_AGENT_KIT_V1';
|
|
23513
24857
|
|
|
23514
24858
|
/**
|
|
23515
24859
|
* Uploads files to OpenAI and returns their IDs
|
|
@@ -23544,10 +24888,10 @@
|
|
|
23544
24888
|
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities, recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
23545
24889
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
23546
24890
|
*
|
|
24891
|
+
* @deprecated Use `OpenAiAgentKitExecutionTools` instead.
|
|
23547
24892
|
* @public exported from `@promptbook/openai`
|
|
23548
|
-
* @deprecated Use `OpenAiAgentExecutionTools` instead which uses the new OpenAI Responses API
|
|
23549
24893
|
*/
|
|
23550
|
-
class OpenAiAssistantExecutionTools extends
|
|
24894
|
+
class OpenAiAssistantExecutionTools extends OpenAiVectorStoreHandler {
|
|
23551
24895
|
/**
|
|
23552
24896
|
* Creates OpenAI Execution Tools.
|
|
23553
24897
|
*
|
|
@@ -23676,8 +25020,7 @@
|
|
|
23676
25020
|
console.info(colors__default["default"].bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
|
|
23677
25021
|
}
|
|
23678
25022
|
// Create thread and run
|
|
23679
|
-
|
|
23680
|
-
let run = threadAndRun;
|
|
25023
|
+
let run = (await client.beta.threads.createAndRun(rawRequest));
|
|
23681
25024
|
const completedToolCalls = [];
|
|
23682
25025
|
const toolCallStartedAt = new Map();
|
|
23683
25026
|
// Poll until run completes or requires action
|
|
@@ -23772,14 +25115,14 @@
|
|
|
23772
25115
|
}
|
|
23773
25116
|
}
|
|
23774
25117
|
// Submit tool outputs
|
|
23775
|
-
run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
25118
|
+
run = (await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
|
|
23776
25119
|
tool_outputs: toolOutputs,
|
|
23777
|
-
});
|
|
25120
|
+
}));
|
|
23778
25121
|
}
|
|
23779
25122
|
else {
|
|
23780
25123
|
// Wait a bit before polling again
|
|
23781
25124
|
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
23782
|
-
run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
|
|
25125
|
+
run = (await client.beta.threads.runs.retrieve(run.thread_id, run.id));
|
|
23783
25126
|
}
|
|
23784
25127
|
}
|
|
23785
25128
|
if (run.status !== 'completed') {
|
|
@@ -23978,6 +25321,7 @@
|
|
|
23978
25321
|
getAssistant(assistantId) {
|
|
23979
25322
|
return new OpenAiAssistantExecutionTools({
|
|
23980
25323
|
...this.options,
|
|
25324
|
+
isCreatingNewAssistantsAllowed: this.isCreatingNewAssistantsAllowed,
|
|
23981
25325
|
assistantId,
|
|
23982
25326
|
});
|
|
23983
25327
|
}
|
|
@@ -24003,88 +25347,13 @@
|
|
|
24003
25347
|
let vectorStoreId;
|
|
24004
25348
|
// If knowledge sources are provided, create a vector store with them
|
|
24005
25349
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
24006
|
-
|
|
24007
|
-
|
|
24008
|
-
|
|
24009
|
-
|
|
24010
|
-
|
|
24011
|
-
}
|
|
24012
|
-
// Create a vector store
|
|
24013
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
24014
|
-
name: `${name} Knowledge Base`,
|
|
25350
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
25351
|
+
client,
|
|
25352
|
+
name,
|
|
25353
|
+
knowledgeSources,
|
|
25354
|
+
logLabel: 'assistant creation',
|
|
24015
25355
|
});
|
|
24016
|
-
vectorStoreId =
|
|
24017
|
-
if (this.options.isVerbose) {
|
|
24018
|
-
console.info('[🤰]', 'Vector store created', {
|
|
24019
|
-
vectorStoreId,
|
|
24020
|
-
});
|
|
24021
|
-
}
|
|
24022
|
-
// Upload files from knowledge sources to the vector store
|
|
24023
|
-
const fileStreams = [];
|
|
24024
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
24025
|
-
try {
|
|
24026
|
-
if (this.options.isVerbose) {
|
|
24027
|
-
console.info('[🤰]', 'Processing knowledge source', {
|
|
24028
|
-
index: index + 1,
|
|
24029
|
-
total: knowledgeSources.length,
|
|
24030
|
-
source,
|
|
24031
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
24032
|
-
});
|
|
24033
|
-
}
|
|
24034
|
-
// Check if it's a URL
|
|
24035
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
24036
|
-
// Download the file
|
|
24037
|
-
const response = await fetch(source);
|
|
24038
|
-
if (!response.ok) {
|
|
24039
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
24040
|
-
continue;
|
|
24041
|
-
}
|
|
24042
|
-
const buffer = await response.arrayBuffer();
|
|
24043
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
24044
|
-
try {
|
|
24045
|
-
const url = new URL(source);
|
|
24046
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
24047
|
-
}
|
|
24048
|
-
catch (error) {
|
|
24049
|
-
// Keep default filename
|
|
24050
|
-
}
|
|
24051
|
-
const blob = new Blob([buffer]);
|
|
24052
|
-
const file = new File([blob], filename);
|
|
24053
|
-
fileStreams.push(file);
|
|
24054
|
-
}
|
|
24055
|
-
else {
|
|
24056
|
-
/*
|
|
24057
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
24058
|
-
// Assume it's a local file path
|
|
24059
|
-
// Note: This will work in Node.js environment
|
|
24060
|
-
// For browser environments, this would need different handling
|
|
24061
|
-
const fs = await import('fs');
|
|
24062
|
-
const fileStream = fs.createReadStream(source);
|
|
24063
|
-
fileStreams.push(fileStream);
|
|
24064
|
-
*/
|
|
24065
|
-
}
|
|
24066
|
-
}
|
|
24067
|
-
catch (error) {
|
|
24068
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
24069
|
-
}
|
|
24070
|
-
}
|
|
24071
|
-
// Batch upload files to the vector store
|
|
24072
|
-
if (fileStreams.length > 0) {
|
|
24073
|
-
try {
|
|
24074
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
24075
|
-
files: fileStreams,
|
|
24076
|
-
});
|
|
24077
|
-
if (this.options.isVerbose) {
|
|
24078
|
-
console.info('[🤰]', 'Uploaded files to vector store', {
|
|
24079
|
-
vectorStoreId,
|
|
24080
|
-
fileCount: fileStreams.length,
|
|
24081
|
-
});
|
|
24082
|
-
}
|
|
24083
|
-
}
|
|
24084
|
-
catch (error) {
|
|
24085
|
-
console.error('Error uploading files to vector store:', error);
|
|
24086
|
-
}
|
|
24087
|
-
}
|
|
25356
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24088
25357
|
}
|
|
24089
25358
|
// Create assistant with vector store attached
|
|
24090
25359
|
const assistantConfig = {
|
|
@@ -24151,91 +25420,14 @@
|
|
|
24151
25420
|
const client = await this.getClient();
|
|
24152
25421
|
let vectorStoreId;
|
|
24153
25422
|
// If knowledge sources are provided, create a vector store with them
|
|
24154
|
-
// TODO: [🧠] Reuse vector store creation logic from createNewAssistant
|
|
24155
25423
|
if (knowledgeSources && knowledgeSources.length > 0) {
|
|
24156
|
-
|
|
24157
|
-
|
|
24158
|
-
|
|
24159
|
-
|
|
24160
|
-
|
|
24161
|
-
});
|
|
24162
|
-
}
|
|
24163
|
-
// Create a vector store
|
|
24164
|
-
const vectorStore = await client.beta.vectorStores.create({
|
|
24165
|
-
name: `${name} Knowledge Base`,
|
|
25424
|
+
const vectorStoreResult = await this.createVectorStoreWithKnowledgeSources({
|
|
25425
|
+
client,
|
|
25426
|
+
name: name !== null && name !== void 0 ? name : assistantId,
|
|
25427
|
+
knowledgeSources,
|
|
25428
|
+
logLabel: 'assistant update',
|
|
24166
25429
|
});
|
|
24167
|
-
vectorStoreId =
|
|
24168
|
-
if (this.options.isVerbose) {
|
|
24169
|
-
console.info('[🤰]', 'Vector store created for assistant update', {
|
|
24170
|
-
vectorStoreId,
|
|
24171
|
-
});
|
|
24172
|
-
}
|
|
24173
|
-
// Upload files from knowledge sources to the vector store
|
|
24174
|
-
const fileStreams = [];
|
|
24175
|
-
for (const [index, source] of knowledgeSources.entries()) {
|
|
24176
|
-
try {
|
|
24177
|
-
if (this.options.isVerbose) {
|
|
24178
|
-
console.info('[🤰]', 'Processing knowledge source for update', {
|
|
24179
|
-
index: index + 1,
|
|
24180
|
-
total: knowledgeSources.length,
|
|
24181
|
-
source,
|
|
24182
|
-
sourceType: source.startsWith('http') || source.startsWith('https') ? 'url' : 'file',
|
|
24183
|
-
});
|
|
24184
|
-
}
|
|
24185
|
-
// Check if it's a URL
|
|
24186
|
-
if (source.startsWith('http://') || source.startsWith('https://')) {
|
|
24187
|
-
// Download the file
|
|
24188
|
-
const response = await fetch(source);
|
|
24189
|
-
if (!response.ok) {
|
|
24190
|
-
console.error(`Failed to download ${source}: ${response.statusText}`);
|
|
24191
|
-
continue;
|
|
24192
|
-
}
|
|
24193
|
-
const buffer = await response.arrayBuffer();
|
|
24194
|
-
let filename = source.split('/').pop() || 'downloaded-file';
|
|
24195
|
-
try {
|
|
24196
|
-
const url = new URL(source);
|
|
24197
|
-
filename = url.pathname.split('/').pop() || filename;
|
|
24198
|
-
}
|
|
24199
|
-
catch (error) {
|
|
24200
|
-
// Keep default filename
|
|
24201
|
-
}
|
|
24202
|
-
const blob = new Blob([buffer]);
|
|
24203
|
-
const file = new File([blob], filename);
|
|
24204
|
-
fileStreams.push(file);
|
|
24205
|
-
}
|
|
24206
|
-
else {
|
|
24207
|
-
/*
|
|
24208
|
-
TODO: [🐱🚀] Resolve problem with browser environment
|
|
24209
|
-
// Assume it's a local file path
|
|
24210
|
-
// Note: This will work in Node.js environment
|
|
24211
|
-
// For browser environments, this would need different handling
|
|
24212
|
-
const fs = await import('fs');
|
|
24213
|
-
const fileStream = fs.createReadStream(source);
|
|
24214
|
-
fileStreams.push(fileStream);
|
|
24215
|
-
*/
|
|
24216
|
-
}
|
|
24217
|
-
}
|
|
24218
|
-
catch (error) {
|
|
24219
|
-
console.error(`Error processing knowledge source ${source}:`, error);
|
|
24220
|
-
}
|
|
24221
|
-
}
|
|
24222
|
-
// Batch upload files to the vector store
|
|
24223
|
-
if (fileStreams.length > 0) {
|
|
24224
|
-
try {
|
|
24225
|
-
await client.beta.vectorStores.fileBatches.uploadAndPoll(vectorStoreId, {
|
|
24226
|
-
files: fileStreams,
|
|
24227
|
-
});
|
|
24228
|
-
if (this.options.isVerbose) {
|
|
24229
|
-
console.info('[🤰]', 'Uploaded files to vector store for update', {
|
|
24230
|
-
vectorStoreId,
|
|
24231
|
-
fileCount: fileStreams.length,
|
|
24232
|
-
});
|
|
24233
|
-
}
|
|
24234
|
-
}
|
|
24235
|
-
catch (error) {
|
|
24236
|
-
console.error('Error uploading files to vector store:', error);
|
|
24237
|
-
}
|
|
24238
|
-
}
|
|
25430
|
+
vectorStoreId = vectorStoreResult.vectorStoreId;
|
|
24239
25431
|
}
|
|
24240
25432
|
const assistantUpdate = {
|
|
24241
25433
|
name,
|
|
@@ -24339,8 +25531,8 @@
|
|
|
24339
25531
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24340
25532
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24341
25533
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24342
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
24343
25534
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
25535
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24344
25536
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24345
25537
|
*
|
|
24346
25538
|
* @public exported from `@promptbook/core`
|
|
@@ -24475,97 +25667,129 @@
|
|
|
24475
25667
|
* Calls the chat model with agent-specific system prompt and requirements with streaming
|
|
24476
25668
|
*/
|
|
24477
25669
|
async callChatModelStream(prompt, onProgress) {
|
|
25670
|
+
var _a, _b;
|
|
24478
25671
|
// Ensure we're working with a chat prompt
|
|
24479
25672
|
if (prompt.modelRequirements.modelVariant !== 'CHAT') {
|
|
24480
25673
|
throw new Error('AgentLlmExecutionTools only supports chat prompts');
|
|
24481
25674
|
}
|
|
24482
25675
|
const modelRequirements = await this.getModelRequirements();
|
|
25676
|
+
const { _metadata, promptSuffix, ...sanitizedRequirements } = modelRequirements;
|
|
24483
25677
|
const chatPrompt = prompt;
|
|
24484
25678
|
let underlyingLlmResult;
|
|
24485
|
-
|
|
25679
|
+
const chatPromptContentWithSuffix = promptSuffix
|
|
25680
|
+
? `${chatPrompt.content}\n\n${promptSuffix}`
|
|
25681
|
+
: chatPrompt.content;
|
|
24486
25682
|
const promptWithAgentModelRequirements = {
|
|
24487
25683
|
...chatPrompt,
|
|
25684
|
+
content: chatPromptContentWithSuffix,
|
|
24488
25685
|
modelRequirements: {
|
|
24489
25686
|
...chatPrompt.modelRequirements,
|
|
24490
|
-
...
|
|
25687
|
+
...sanitizedRequirements,
|
|
24491
25688
|
// Spread tools to convert readonly array to mutable
|
|
24492
|
-
tools:
|
|
25689
|
+
tools: sanitizedRequirements.tools
|
|
25690
|
+
? [...sanitizedRequirements.tools]
|
|
25691
|
+
: chatPrompt.modelRequirements.tools,
|
|
24493
25692
|
// Spread knowledgeSources to convert readonly array to mutable
|
|
24494
|
-
knowledgeSources:
|
|
24495
|
-
? [...
|
|
25693
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources
|
|
25694
|
+
? [...sanitizedRequirements.knowledgeSources]
|
|
24496
25695
|
: undefined,
|
|
24497
25696
|
// Prepend agent system message to existing system message
|
|
24498
|
-
systemMessage:
|
|
25697
|
+
systemMessage: sanitizedRequirements.systemMessage +
|
|
24499
25698
|
(chatPrompt.modelRequirements.systemMessage
|
|
24500
25699
|
? `\n\n${chatPrompt.modelRequirements.systemMessage}`
|
|
24501
25700
|
: ''),
|
|
24502
25701
|
}, // Cast to avoid readonly mismatch from spread
|
|
24503
25702
|
};
|
|
24504
25703
|
console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
|
|
24505
|
-
if (
|
|
24506
|
-
const requirementsHash = cryptoJs.SHA256(JSON.stringify(
|
|
24507
|
-
const
|
|
24508
|
-
|
|
24509
|
-
|
|
25704
|
+
if (OpenAiAgentKitExecutionTools.isOpenAiAgentKitExecutionTools(this.options.llmTools)) {
|
|
25705
|
+
const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
25706
|
+
const vectorStoreHash = cryptoJs.SHA256(JSON.stringify((_a = sanitizedRequirements.knowledgeSources) !== null && _a !== void 0 ? _a : [])).toString();
|
|
25707
|
+
const cachedVectorStore = AgentLlmExecutionTools.vectorStoreCache.get(this.title);
|
|
25708
|
+
const cachedAgentKit = AgentLlmExecutionTools.agentKitAgentCache.get(this.title);
|
|
25709
|
+
let preparedAgentKit = this.options.assistantPreparationMode === 'external'
|
|
25710
|
+
? this.options.llmTools.getPreparedAgentKitAgent()
|
|
25711
|
+
: null;
|
|
25712
|
+
const vectorStoreId = (preparedAgentKit === null || preparedAgentKit === void 0 ? void 0 : preparedAgentKit.vectorStoreId) ||
|
|
25713
|
+
(cachedVectorStore && cachedVectorStore.requirementsHash === vectorStoreHash
|
|
25714
|
+
? cachedVectorStore.vectorStoreId
|
|
25715
|
+
: undefined);
|
|
25716
|
+
if (!preparedAgentKit && cachedAgentKit && cachedAgentKit.requirementsHash === requirementsHash) {
|
|
24510
25717
|
if (this.options.isVerbose) {
|
|
24511
|
-
console.
|
|
25718
|
+
console.info('[🤰]', 'Using cached OpenAI AgentKit agent', {
|
|
25719
|
+
agent: this.title,
|
|
25720
|
+
});
|
|
24512
25721
|
}
|
|
24513
|
-
|
|
24514
|
-
|
|
24515
|
-
|
|
24516
|
-
|
|
24517
|
-
// We can cast to access options if they were public, or use a method to clone.
|
|
24518
|
-
// OpenAiAgentExecutionTools doesn't have a clone method.
|
|
24519
|
-
// However, we can just assume the passed tool *might* not have the vector store yet, or we are replacing it.
|
|
24520
|
-
// Actually, if the passed tool IS OpenAiAgentExecutionTools, we should use it as a base.
|
|
24521
|
-
// TODO: [🧠] This is a bit hacky, accessing protected options or recreating tools.
|
|
24522
|
-
// Ideally OpenAiAgentExecutionTools should have a method `withVectorStoreId`.
|
|
24523
|
-
agentTools = new OpenAiAgentExecutionTools({
|
|
24524
|
-
...this.options.llmTools.options,
|
|
24525
|
-
vectorStoreId: cached.vectorStoreId,
|
|
24526
|
-
});
|
|
25722
|
+
preparedAgentKit = {
|
|
25723
|
+
agent: cachedAgentKit.agent,
|
|
25724
|
+
vectorStoreId: cachedAgentKit.vectorStoreId,
|
|
25725
|
+
};
|
|
24527
25726
|
}
|
|
24528
|
-
|
|
25727
|
+
if (!preparedAgentKit) {
|
|
24529
25728
|
if (this.options.isVerbose) {
|
|
24530
|
-
console.
|
|
24531
|
-
|
|
24532
|
-
|
|
24533
|
-
if (modelRequirements.knowledgeSources && modelRequirements.knowledgeSources.length > 0) {
|
|
24534
|
-
const client = await this.options.llmTools.getClient();
|
|
24535
|
-
vectorStoreId = await OpenAiAgentExecutionTools.createVectorStore(client, this.title, modelRequirements.knowledgeSources);
|
|
25729
|
+
console.info('[🤰]', 'Preparing OpenAI AgentKit agent', {
|
|
25730
|
+
agent: this.title,
|
|
25731
|
+
});
|
|
24536
25732
|
}
|
|
24537
|
-
if (vectorStoreId) {
|
|
24538
|
-
|
|
24539
|
-
|
|
24540
|
-
|
|
25733
|
+
if (!vectorStoreId && ((_b = sanitizedRequirements.knowledgeSources) === null || _b === void 0 ? void 0 : _b.length)) {
|
|
25734
|
+
emitAssistantPreparationProgress({
|
|
25735
|
+
onProgress,
|
|
25736
|
+
prompt,
|
|
25737
|
+
modelName: this.modelName,
|
|
25738
|
+
phase: 'Creating knowledge base',
|
|
24541
25739
|
});
|
|
24542
25740
|
}
|
|
24543
|
-
|
|
24544
|
-
|
|
25741
|
+
emitAssistantPreparationProgress({
|
|
25742
|
+
onProgress,
|
|
25743
|
+
prompt,
|
|
25744
|
+
modelName: this.modelName,
|
|
25745
|
+
phase: 'Preparing AgentKit agent',
|
|
25746
|
+
});
|
|
25747
|
+
preparedAgentKit = await this.options.llmTools.prepareAgentKitAgent({
|
|
25748
|
+
name: this.title,
|
|
25749
|
+
instructions: sanitizedRequirements.systemMessage || '',
|
|
25750
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25751
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24545
25752
|
vectorStoreId,
|
|
24546
25753
|
});
|
|
24547
25754
|
}
|
|
24548
|
-
|
|
24549
|
-
|
|
24550
|
-
|
|
24551
|
-
|
|
24552
|
-
|
|
24553
|
-
|
|
24554
|
-
|
|
24555
|
-
|
|
24556
|
-
|
|
24557
|
-
|
|
24558
|
-
|
|
24559
|
-
|
|
24560
|
-
|
|
24561
|
-
|
|
25755
|
+
if (preparedAgentKit.vectorStoreId) {
|
|
25756
|
+
AgentLlmExecutionTools.vectorStoreCache.set(this.title, {
|
|
25757
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
25758
|
+
requirementsHash: vectorStoreHash,
|
|
25759
|
+
});
|
|
25760
|
+
}
|
|
25761
|
+
AgentLlmExecutionTools.agentKitAgentCache.set(this.title, {
|
|
25762
|
+
agent: preparedAgentKit.agent,
|
|
25763
|
+
requirementsHash,
|
|
25764
|
+
vectorStoreId: preparedAgentKit.vectorStoreId,
|
|
25765
|
+
});
|
|
25766
|
+
const responseFormatOutputType = mapResponseFormatToAgentOutputType(promptWithAgentModelRequirements.modelRequirements.responseFormat);
|
|
25767
|
+
underlyingLlmResult = await this.options.llmTools.callChatModelStreamWithPreparedAgent({
|
|
25768
|
+
openAiAgentKitAgent: preparedAgentKit.agent,
|
|
25769
|
+
prompt: promptWithAgentModelRequirements,
|
|
25770
|
+
onProgress,
|
|
25771
|
+
responseFormatOutputType,
|
|
25772
|
+
});
|
|
24562
25773
|
}
|
|
24563
25774
|
else if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
|
|
24564
25775
|
// ... deprecated path ...
|
|
24565
|
-
const requirementsHash = cryptoJs.SHA256(JSON.stringify(
|
|
25776
|
+
const requirementsHash = cryptoJs.SHA256(JSON.stringify(sanitizedRequirements)).toString();
|
|
24566
25777
|
const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
|
|
24567
25778
|
let assistant;
|
|
24568
|
-
if (
|
|
25779
|
+
if (this.options.assistantPreparationMode === 'external') {
|
|
25780
|
+
assistant = this.options.llmTools;
|
|
25781
|
+
if (this.options.isVerbose) {
|
|
25782
|
+
console.info('[🤰]', 'Using externally managed OpenAI Assistant', {
|
|
25783
|
+
agent: this.title,
|
|
25784
|
+
assistantId: assistant.assistantId,
|
|
25785
|
+
});
|
|
25786
|
+
}
|
|
25787
|
+
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
25788
|
+
assistantId: assistant.assistantId,
|
|
25789
|
+
requirementsHash,
|
|
25790
|
+
});
|
|
25791
|
+
}
|
|
25792
|
+
else if (cached) {
|
|
24569
25793
|
if (cached.requirementsHash === requirementsHash) {
|
|
24570
25794
|
if (this.options.isVerbose) {
|
|
24571
25795
|
console.info('[🤰]', 'Using cached OpenAI Assistant', {
|
|
@@ -24591,9 +25815,9 @@
|
|
|
24591
25815
|
assistant = await this.options.llmTools.updateAssistant({
|
|
24592
25816
|
assistantId: cached.assistantId,
|
|
24593
25817
|
name: this.title,
|
|
24594
|
-
instructions:
|
|
24595
|
-
knowledgeSources:
|
|
24596
|
-
tools:
|
|
25818
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
25819
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25820
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24597
25821
|
});
|
|
24598
25822
|
AgentLlmExecutionTools.assistantCache.set(this.title, {
|
|
24599
25823
|
assistantId: assistant.assistantId,
|
|
@@ -24616,9 +25840,9 @@
|
|
|
24616
25840
|
});
|
|
24617
25841
|
assistant = await this.options.llmTools.createNewAssistant({
|
|
24618
25842
|
name: this.title,
|
|
24619
|
-
instructions:
|
|
24620
|
-
knowledgeSources:
|
|
24621
|
-
tools:
|
|
25843
|
+
instructions: sanitizedRequirements.systemMessage,
|
|
25844
|
+
knowledgeSources: sanitizedRequirements.knowledgeSources,
|
|
25845
|
+
tools: sanitizedRequirements.tools ? [...sanitizedRequirements.tools] : undefined,
|
|
24622
25846
|
/*
|
|
24623
25847
|
!!!
|
|
24624
25848
|
metadata: {
|
|
@@ -24660,18 +25884,28 @@
|
|
|
24660
25884
|
}
|
|
24661
25885
|
}
|
|
24662
25886
|
let content = underlyingLlmResult.content;
|
|
24663
|
-
|
|
24664
|
-
|
|
24665
|
-
|
|
24666
|
-
|
|
25887
|
+
if (typeof content === 'string') {
|
|
25888
|
+
// Note: Cleanup the AI artifacts from the content
|
|
25889
|
+
content = humanizeAiText(content);
|
|
25890
|
+
// Note: Make sure the content is Promptbook-like
|
|
25891
|
+
content = promptbookifyAiText(content);
|
|
25892
|
+
}
|
|
25893
|
+
else {
|
|
25894
|
+
// TODO: Maybe deep `humanizeAiText` + `promptbookifyAiText` inside of the object
|
|
25895
|
+
content = JSON.stringify(content);
|
|
25896
|
+
}
|
|
24667
25897
|
const agentResult = {
|
|
24668
25898
|
...underlyingLlmResult,
|
|
24669
|
-
content,
|
|
25899
|
+
content: content,
|
|
24670
25900
|
modelName: this.modelName,
|
|
24671
25901
|
};
|
|
24672
25902
|
return agentResult;
|
|
24673
25903
|
}
|
|
24674
25904
|
}
|
|
25905
|
+
/**
|
|
25906
|
+
* Cached AgentKit agents to avoid rebuilding identical instances.
|
|
25907
|
+
*/
|
|
25908
|
+
AgentLlmExecutionTools.agentKitAgentCache = new Map();
|
|
24675
25909
|
/**
|
|
24676
25910
|
* Cache of OpenAI assistants to avoid creating duplicates
|
|
24677
25911
|
*/
|
|
@@ -24752,8 +25986,8 @@
|
|
|
24752
25986
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
24753
25987
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
24754
25988
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
24755
|
-
* - `OpenAiAgentExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with agent capabilities (using Responses API), recommended for usage in `Agent` or `AgentLlmExecutionTools`
|
|
24756
25989
|
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
25990
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
24757
25991
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
24758
25992
|
*
|
|
24759
25993
|
* @public exported from `@promptbook/core`
|
|
@@ -24784,6 +26018,7 @@
|
|
|
24784
26018
|
super({
|
|
24785
26019
|
isVerbose: options.isVerbose,
|
|
24786
26020
|
llmTools: getSingleLlmExecutionTools(options.executionTools.llm),
|
|
26021
|
+
assistantPreparationMode: options.assistantPreparationMode,
|
|
24787
26022
|
agentSource: agentSource.value, // <- TODO: [🐱🚀] Allow to pass BehaviorSubject<string_book> OR refresh llmExecutionTools.callChat on agentSource change
|
|
24788
26023
|
});
|
|
24789
26024
|
_Agent_instances.add(this);
|
|
@@ -24850,7 +26085,6 @@
|
|
|
24850
26085
|
* Note: This method also implements the learning mechanism
|
|
24851
26086
|
*/
|
|
24852
26087
|
async callChatModelStream(prompt, onProgress) {
|
|
24853
|
-
var _a;
|
|
24854
26088
|
// [1] Check if the user is asking the same thing as in the samples
|
|
24855
26089
|
const modelRequirements = await this.getModelRequirements();
|
|
24856
26090
|
if (modelRequirements.samples) {
|
|
@@ -24898,7 +26132,7 @@
|
|
|
24898
26132
|
if (result.rawResponse && 'sample' in result.rawResponse) {
|
|
24899
26133
|
return result;
|
|
24900
26134
|
}
|
|
24901
|
-
if (
|
|
26135
|
+
if (modelRequirements.isClosed) {
|
|
24902
26136
|
return result;
|
|
24903
26137
|
}
|
|
24904
26138
|
// Note: [0] Notify start of self-learning
|
|
@@ -25059,6 +26293,63 @@
|
|
|
25059
26293
|
* TODO: [🧠][😰]Agent is not working with the parameters, should it be?
|
|
25060
26294
|
*/
|
|
25061
26295
|
|
|
26296
|
+
/**
|
|
26297
|
+
* Resolve a remote META IMAGE value into an absolute URL when possible.
|
|
26298
|
+
*/
|
|
26299
|
+
function resolveRemoteImageUrl(imageUrl, agentUrl) {
|
|
26300
|
+
if (!imageUrl) {
|
|
26301
|
+
return undefined;
|
|
26302
|
+
}
|
|
26303
|
+
if (imageUrl.startsWith('http://') ||
|
|
26304
|
+
imageUrl.startsWith('https://') ||
|
|
26305
|
+
imageUrl.startsWith('data:') ||
|
|
26306
|
+
imageUrl.startsWith('blob:')) {
|
|
26307
|
+
return imageUrl;
|
|
26308
|
+
}
|
|
26309
|
+
try {
|
|
26310
|
+
return new URL(imageUrl, agentUrl).href;
|
|
26311
|
+
}
|
|
26312
|
+
catch (_a) {
|
|
26313
|
+
return imageUrl;
|
|
26314
|
+
}
|
|
26315
|
+
}
|
|
26316
|
+
/**
|
|
26317
|
+
* Format a META commitment line when the value is provided.
|
|
26318
|
+
*/
|
|
26319
|
+
function formatMetaLine(label, value) {
|
|
26320
|
+
if (!value) {
|
|
26321
|
+
return null;
|
|
26322
|
+
}
|
|
26323
|
+
return `META ${label} ${value}`;
|
|
26324
|
+
}
|
|
26325
|
+
/**
|
|
26326
|
+
* Build a minimal agent source snapshot for remote agents.
|
|
26327
|
+
*/
|
|
26328
|
+
function buildRemoteAgentSource(profile, meta) {
|
|
26329
|
+
const metaLines = [
|
|
26330
|
+
formatMetaLine('FULLNAME', meta === null || meta === void 0 ? void 0 : meta.fullname),
|
|
26331
|
+
formatMetaLine('IMAGE', meta === null || meta === void 0 ? void 0 : meta.image),
|
|
26332
|
+
formatMetaLine('DESCRIPTION', meta === null || meta === void 0 ? void 0 : meta.description),
|
|
26333
|
+
formatMetaLine('COLOR', meta === null || meta === void 0 ? void 0 : meta.color),
|
|
26334
|
+
formatMetaLine('FONT', meta === null || meta === void 0 ? void 0 : meta.font),
|
|
26335
|
+
formatMetaLine('LINK', meta === null || meta === void 0 ? void 0 : meta.link),
|
|
26336
|
+
]
|
|
26337
|
+
.filter((line) => Boolean(line))
|
|
26338
|
+
.join('\n');
|
|
26339
|
+
const personaBlock = profile.personaDescription
|
|
26340
|
+
? spaceTrim__default["default"]((block) => `
|
|
26341
|
+
PERSONA
|
|
26342
|
+
${block(profile.personaDescription || '')}
|
|
26343
|
+
`)
|
|
26344
|
+
: '';
|
|
26345
|
+
return book `
|
|
26346
|
+
${profile.agentName}
|
|
26347
|
+
|
|
26348
|
+
${metaLines}
|
|
26349
|
+
|
|
26350
|
+
${personaBlock}
|
|
26351
|
+
`;
|
|
26352
|
+
}
|
|
25062
26353
|
/**
|
|
25063
26354
|
* Represents one AI Agent
|
|
25064
26355
|
*
|
|
@@ -25066,13 +26357,15 @@
|
|
|
25066
26357
|
* - `Agent` - which represents an AI Agent with its source, memories, actions, etc. Agent is a higher-level abstraction which is internally using:
|
|
25067
26358
|
* - `LlmExecutionTools` - which wraps one or more LLM models and provides an interface to execute them
|
|
25068
26359
|
* - `AgentLlmExecutionTools` - which is a specific implementation of `LlmExecutionTools` that wraps another LlmExecutionTools and applies agent-specific system prompts and requirements
|
|
25069
|
-
* - `OpenAiAssistantExecutionTools` - which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
26360
|
+
* - `OpenAiAssistantExecutionTools` - (Deprecated) which is a specific implementation of `LlmExecutionTools` for OpenAI models with assistant capabilities
|
|
26361
|
+
* - `OpenAiAgentKitExecutionTools` - which is a specific implementation of `LlmExecutionTools` backed by OpenAI AgentKit
|
|
25070
26362
|
* - `RemoteAgent` - which is an `Agent` that connects to a Promptbook Agents Server
|
|
25071
26363
|
*
|
|
25072
26364
|
* @public exported from `@promptbook/core`
|
|
25073
26365
|
*/
|
|
25074
26366
|
class RemoteAgent extends Agent {
|
|
25075
26367
|
static async connect(options) {
|
|
26368
|
+
var _a, _b, _c;
|
|
25076
26369
|
const agentProfileUrl = `${options.agentUrl}/api/profile`;
|
|
25077
26370
|
const profileResponse = await fetch(agentProfileUrl);
|
|
25078
26371
|
// <- TODO: [🐱🚀] What about closed-source agents?
|
|
@@ -25092,14 +26385,14 @@
|
|
|
25092
26385
|
|
|
25093
26386
|
`));
|
|
25094
26387
|
}
|
|
25095
|
-
const profile = await profileResponse.json();
|
|
26388
|
+
const profile = (await profileResponse.json());
|
|
26389
|
+
const resolvedMeta = {
|
|
26390
|
+
...(profile.meta || {}),
|
|
26391
|
+
image: resolveRemoteImageUrl((_a = profile.meta) === null || _a === void 0 ? void 0 : _a.image, options.agentUrl),
|
|
26392
|
+
};
|
|
25096
26393
|
// Note: We are creating dummy agent source because we don't have the source from the remote agent
|
|
25097
26394
|
// But we populate the metadata from the profile
|
|
25098
|
-
const agentSource = new rxjs.BehaviorSubject(
|
|
25099
|
-
${profile.agentName}
|
|
25100
|
-
|
|
25101
|
-
${profile.personaDescription}
|
|
25102
|
-
`);
|
|
26395
|
+
const agentSource = new rxjs.BehaviorSubject(buildRemoteAgentSource(profile, resolvedMeta));
|
|
25103
26396
|
// <- TODO: [🐱🚀] createBookFromProfile
|
|
25104
26397
|
// <- TODO: [🐱🚀] Support updating and self-updating
|
|
25105
26398
|
const remoteAgent = new RemoteAgent({
|
|
@@ -25122,10 +26415,10 @@
|
|
|
25122
26415
|
});
|
|
25123
26416
|
remoteAgent._remoteAgentName = profile.agentName;
|
|
25124
26417
|
remoteAgent._remoteAgentHash = profile.agentHash;
|
|
25125
|
-
remoteAgent.personaDescription = profile.personaDescription;
|
|
25126
|
-
remoteAgent.initialMessage = profile.initialMessage;
|
|
25127
|
-
remoteAgent.links = profile.links;
|
|
25128
|
-
remoteAgent.meta =
|
|
26418
|
+
remoteAgent.personaDescription = (_b = profile.personaDescription) !== null && _b !== void 0 ? _b : null;
|
|
26419
|
+
remoteAgent.initialMessage = (_c = profile.initialMessage) !== null && _c !== void 0 ? _c : null;
|
|
26420
|
+
remoteAgent.links = profile.links || [];
|
|
26421
|
+
remoteAgent.meta = resolvedMeta;
|
|
25129
26422
|
remoteAgent.capabilities = profile.capabilities || [];
|
|
25130
26423
|
remoteAgent.samples = profile.samples || [];
|
|
25131
26424
|
remoteAgent.toolTitles = profile.toolTitles || {};
|
|
@@ -25229,26 +26522,7 @@
|
|
|
25229
26522
|
};
|
|
25230
26523
|
};
|
|
25231
26524
|
const getToolCallKey = (toolCall) => {
|
|
25232
|
-
|
|
25233
|
-
const rawId = (_a = toolCall.rawToolCall) === null || _a === void 0 ? void 0 : _a.id;
|
|
25234
|
-
if (rawId) {
|
|
25235
|
-
return `id:${rawId}`;
|
|
25236
|
-
}
|
|
25237
|
-
const argsKey = (() => {
|
|
25238
|
-
if (typeof toolCall.arguments === 'string') {
|
|
25239
|
-
return toolCall.arguments;
|
|
25240
|
-
}
|
|
25241
|
-
if (!toolCall.arguments) {
|
|
25242
|
-
return '';
|
|
25243
|
-
}
|
|
25244
|
-
try {
|
|
25245
|
-
return JSON.stringify(toolCall.arguments);
|
|
25246
|
-
}
|
|
25247
|
-
catch (_a) {
|
|
25248
|
-
return '';
|
|
25249
|
-
}
|
|
25250
|
-
})();
|
|
25251
|
-
return `${toolCall.name}:${toolCall.createdAt || ''}:${argsKey}`;
|
|
26525
|
+
return getToolCallIdentity(toolCall);
|
|
25252
26526
|
};
|
|
25253
26527
|
const mergeToolCall = (existing, incoming) => {
|
|
25254
26528
|
const incomingResult = incoming.result;
|