@promptbook/openai 0.101.0-9 â 0.102.0-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +266 -177
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +30 -0
- package/esm/typings/src/_packages/core.index.d.ts +12 -0
- package/esm/typings/src/_packages/types.index.d.ts +12 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
- package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +266 -177
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts â utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [ð] Ignore a discrepancy between file name and entity name
|
|
@@ -872,76 +872,6 @@
|
|
|
872
872
|
return replacedTemplates;
|
|
873
873
|
}
|
|
874
874
|
|
|
875
|
-
/**
|
|
876
|
-
* Predefined profiles for LLM providers to maintain consistency across the application
|
|
877
|
-
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
878
|
-
*
|
|
879
|
-
* @private !!!!
|
|
880
|
-
*/
|
|
881
|
-
const LLM_PROVIDER_PROFILES = {
|
|
882
|
-
OPENAI: {
|
|
883
|
-
name: 'OPENAI',
|
|
884
|
-
fullname: 'OpenAI GPT',
|
|
885
|
-
color: '#10a37f', // OpenAI's signature green
|
|
886
|
-
// Note: avatarSrc could be added when we have provider logos available
|
|
887
|
-
},
|
|
888
|
-
ANTHROPIC: {
|
|
889
|
-
name: 'ANTHROPIC',
|
|
890
|
-
fullname: 'Anthropic Claude',
|
|
891
|
-
color: '#d97706', // Anthropic's orange/amber color
|
|
892
|
-
},
|
|
893
|
-
AZURE_OPENAI: {
|
|
894
|
-
name: 'AZURE_OPENAI',
|
|
895
|
-
fullname: 'Azure OpenAI',
|
|
896
|
-
color: '#0078d4', // Microsoft Azure blue
|
|
897
|
-
},
|
|
898
|
-
GOOGLE: {
|
|
899
|
-
name: 'GOOGLE',
|
|
900
|
-
fullname: 'Google Gemini',
|
|
901
|
-
color: '#4285f4', // Google blue
|
|
902
|
-
},
|
|
903
|
-
DEEPSEEK: {
|
|
904
|
-
name: 'DEEPSEEK',
|
|
905
|
-
fullname: 'DeepSeek',
|
|
906
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
|
907
|
-
},
|
|
908
|
-
OLLAMA: {
|
|
909
|
-
name: 'OLLAMA',
|
|
910
|
-
fullname: 'Ollama',
|
|
911
|
-
color: '#059669', // Emerald green for local models
|
|
912
|
-
},
|
|
913
|
-
REMOTE: {
|
|
914
|
-
name: 'REMOTE',
|
|
915
|
-
fullname: 'Remote Server',
|
|
916
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
|
917
|
-
},
|
|
918
|
-
MOCKED_ECHO: {
|
|
919
|
-
name: 'MOCKED_ECHO',
|
|
920
|
-
fullname: 'Echo (Test)',
|
|
921
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
|
922
|
-
},
|
|
923
|
-
MOCKED_FAKE: {
|
|
924
|
-
name: 'MOCKED_FAKE',
|
|
925
|
-
fullname: 'Fake LLM (Test)',
|
|
926
|
-
color: '#ec4899', // Pink for fake/test tools
|
|
927
|
-
},
|
|
928
|
-
VERCEL: {
|
|
929
|
-
name: 'VERCEL',
|
|
930
|
-
fullname: 'Vercel AI',
|
|
931
|
-
color: '#000000', // Vercel's black
|
|
932
|
-
},
|
|
933
|
-
MULTIPLE: {
|
|
934
|
-
name: 'MULTIPLE',
|
|
935
|
-
fullname: 'Multiple Providers',
|
|
936
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
|
937
|
-
},
|
|
938
|
-
};
|
|
939
|
-
/**
|
|
940
|
-
* TODO: Refactor this - each profile must be alongside the provider definition
|
|
941
|
-
* TODO: [ð] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
942
|
-
* Note: [ð] Ignore a discrepancy between file name and entity name
|
|
943
|
-
*/
|
|
944
|
-
|
|
945
875
|
/**
|
|
946
876
|
* Counts number of characters in the text
|
|
947
877
|
*
|
|
@@ -1402,7 +1332,7 @@
|
|
|
1402
1332
|
modelVariant: 'CHAT',
|
|
1403
1333
|
modelTitle: 'gpt-5-mini',
|
|
1404
1334
|
modelName: 'gpt-5-mini',
|
|
1405
|
-
modelDescription:
|
|
1335
|
+
modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
|
|
1406
1336
|
pricing: {
|
|
1407
1337
|
prompt: pricing(`$0.25 / 1M tokens`),
|
|
1408
1338
|
output: pricing(`$2.00 / 1M tokens`),
|
|
@@ -1414,7 +1344,7 @@
|
|
|
1414
1344
|
modelVariant: 'CHAT',
|
|
1415
1345
|
modelTitle: 'gpt-5-nano',
|
|
1416
1346
|
modelName: 'gpt-5-nano',
|
|
1417
|
-
modelDescription:
|
|
1347
|
+
modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
|
|
1418
1348
|
pricing: {
|
|
1419
1349
|
prompt: pricing(`$0.05 / 1M tokens`),
|
|
1420
1350
|
output: pricing(`$0.40 / 1M tokens`),
|
|
@@ -1426,7 +1356,7 @@
|
|
|
1426
1356
|
modelVariant: 'CHAT',
|
|
1427
1357
|
modelTitle: 'gpt-4.1',
|
|
1428
1358
|
modelName: 'gpt-4.1',
|
|
1429
|
-
modelDescription:
|
|
1359
|
+
modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
|
|
1430
1360
|
pricing: {
|
|
1431
1361
|
prompt: pricing(`$3.00 / 1M tokens`),
|
|
1432
1362
|
output: pricing(`$12.00 / 1M tokens`),
|
|
@@ -1438,7 +1368,7 @@
|
|
|
1438
1368
|
modelVariant: 'CHAT',
|
|
1439
1369
|
modelTitle: 'gpt-4.1-mini',
|
|
1440
1370
|
modelName: 'gpt-4.1-mini',
|
|
1441
|
-
modelDescription:
|
|
1371
|
+
modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
|
|
1442
1372
|
pricing: {
|
|
1443
1373
|
prompt: pricing(`$0.80 / 1M tokens`),
|
|
1444
1374
|
output: pricing(`$3.20 / 1M tokens`),
|
|
@@ -1450,7 +1380,7 @@
|
|
|
1450
1380
|
modelVariant: 'CHAT',
|
|
1451
1381
|
modelTitle: 'gpt-4.1-nano',
|
|
1452
1382
|
modelName: 'gpt-4.1-nano',
|
|
1453
|
-
modelDescription:
|
|
1383
|
+
modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
|
|
1454
1384
|
pricing: {
|
|
1455
1385
|
prompt: pricing(`$0.20 / 1M tokens`),
|
|
1456
1386
|
output: pricing(`$0.80 / 1M tokens`),
|
|
@@ -1462,7 +1392,7 @@
|
|
|
1462
1392
|
modelVariant: 'CHAT',
|
|
1463
1393
|
modelTitle: 'o3',
|
|
1464
1394
|
modelName: 'o3',
|
|
1465
|
-
modelDescription:
|
|
1395
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
|
|
1466
1396
|
pricing: {
|
|
1467
1397
|
prompt: pricing(`$15.00 / 1M tokens`),
|
|
1468
1398
|
output: pricing(`$60.00 / 1M tokens`),
|
|
@@ -1474,7 +1404,7 @@
|
|
|
1474
1404
|
modelVariant: 'CHAT',
|
|
1475
1405
|
modelTitle: 'o3-pro',
|
|
1476
1406
|
modelName: 'o3-pro',
|
|
1477
|
-
modelDescription:
|
|
1407
|
+
modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
|
|
1478
1408
|
pricing: {
|
|
1479
1409
|
prompt: pricing(`$30.00 / 1M tokens`),
|
|
1480
1410
|
output: pricing(`$120.00 / 1M tokens`),
|
|
@@ -1486,7 +1416,7 @@
|
|
|
1486
1416
|
modelVariant: 'CHAT',
|
|
1487
1417
|
modelTitle: 'o4-mini',
|
|
1488
1418
|
modelName: 'o4-mini',
|
|
1489
|
-
modelDescription:
|
|
1419
|
+
modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
|
|
1490
1420
|
pricing: {
|
|
1491
1421
|
prompt: pricing(`$4.00 / 1M tokens`),
|
|
1492
1422
|
output: pricing(`$16.00 / 1M tokens`),
|
|
@@ -1498,7 +1428,7 @@
|
|
|
1498
1428
|
modelVariant: 'CHAT',
|
|
1499
1429
|
modelTitle: 'o3-deep-research',
|
|
1500
1430
|
modelName: 'o3-deep-research',
|
|
1501
|
-
modelDescription:
|
|
1431
|
+
modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
|
|
1502
1432
|
pricing: {
|
|
1503
1433
|
prompt: pricing(`$25.00 / 1M tokens`),
|
|
1504
1434
|
output: pricing(`$100.00 / 1M tokens`),
|
|
@@ -1510,7 +1440,7 @@
|
|
|
1510
1440
|
modelVariant: 'CHAT',
|
|
1511
1441
|
modelTitle: 'o4-mini-deep-research',
|
|
1512
1442
|
modelName: 'o4-mini-deep-research',
|
|
1513
|
-
modelDescription:
|
|
1443
|
+
modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
|
|
1514
1444
|
pricing: {
|
|
1515
1445
|
prompt: pricing(`$12.00 / 1M tokens`),
|
|
1516
1446
|
output: pricing(`$48.00 / 1M tokens`),
|
|
@@ -2018,6 +1948,62 @@
|
|
|
2018
1948
|
* TODO: [ðĪ] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
2019
1949
|
*/
|
|
2020
1950
|
|
|
1951
|
+
/**
|
|
1952
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
|
1953
|
+
*
|
|
1954
|
+
* @param errorMessage The error message from OpenAI API
|
|
1955
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
|
1956
|
+
* @private utility of LLM Tools
|
|
1957
|
+
*/
|
|
1958
|
+
function parseUnsupportedParameterError(errorMessage) {
|
|
1959
|
+
// Pattern to match "Unsupported value: 'parameter' does not support ..."
|
|
1960
|
+
const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
|
|
1961
|
+
if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
|
|
1962
|
+
return unsupportedValueMatch[1];
|
|
1963
|
+
}
|
|
1964
|
+
// Pattern to match "'parameter' of type ... is not supported with this model"
|
|
1965
|
+
const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
|
|
1966
|
+
if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
|
|
1967
|
+
return parameterTypeMatch[1];
|
|
1968
|
+
}
|
|
1969
|
+
return null;
|
|
1970
|
+
}
|
|
1971
|
+
/**
|
|
1972
|
+
* Creates a copy of model requirements with the specified parameter removed
|
|
1973
|
+
*
|
|
1974
|
+
* @param modelRequirements Original model requirements
|
|
1975
|
+
* @param unsupportedParameter The parameter to remove
|
|
1976
|
+
* @returns New model requirements without the unsupported parameter
|
|
1977
|
+
* @private utility of LLM Tools
|
|
1978
|
+
*/
|
|
1979
|
+
function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
|
|
1980
|
+
const newRequirements = { ...modelRequirements };
|
|
1981
|
+
// Map of parameter names that might appear in error messages to ModelRequirements properties
|
|
1982
|
+
const parameterMap = {
|
|
1983
|
+
temperature: 'temperature',
|
|
1984
|
+
max_tokens: 'maxTokens',
|
|
1985
|
+
maxTokens: 'maxTokens',
|
|
1986
|
+
seed: 'seed',
|
|
1987
|
+
};
|
|
1988
|
+
const propertyToRemove = parameterMap[unsupportedParameter];
|
|
1989
|
+
if (propertyToRemove && propertyToRemove in newRequirements) {
|
|
1990
|
+
delete newRequirements[propertyToRemove];
|
|
1991
|
+
}
|
|
1992
|
+
return newRequirements;
|
|
1993
|
+
}
|
|
1994
|
+
/**
|
|
1995
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
|
1996
|
+
* @param error The error to check
|
|
1997
|
+
* @returns true if this is an unsupported parameter error
|
|
1998
|
+
* @private utility of LLM Tools
|
|
1999
|
+
*/
|
|
2000
|
+
function isUnsupportedParameterError(error) {
|
|
2001
|
+
const errorMessage = error.message.toLowerCase();
|
|
2002
|
+
return (errorMessage.includes('unsupported value:') ||
|
|
2003
|
+
errorMessage.includes('is not supported with this model') ||
|
|
2004
|
+
errorMessage.includes('does not support'));
|
|
2005
|
+
}
|
|
2006
|
+
|
|
2021
2007
|
/**
|
|
2022
2008
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
2023
2009
|
*
|
|
@@ -2035,6 +2021,10 @@
|
|
|
2035
2021
|
* OpenAI API client.
|
|
2036
2022
|
*/
|
|
2037
2023
|
this.client = null;
|
|
2024
|
+
/**
|
|
2025
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
|
2026
|
+
*/
|
|
2027
|
+
this.retriedUnsupportedParameters = new Set();
|
|
2038
2028
|
// TODO: Allow configuring rate limits via options
|
|
2039
2029
|
this.limiter = new Bottleneck__default["default"]({
|
|
2040
2030
|
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
|
@@ -2096,21 +2086,27 @@
|
|
|
2096
2086
|
* Calls OpenAI compatible API to use a chat model.
|
|
2097
2087
|
*/
|
|
2098
2088
|
async callChatModel(prompt) {
|
|
2089
|
+
return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
|
|
2090
|
+
}
|
|
2091
|
+
/**
|
|
2092
|
+
* Internal method that handles parameter retry for chat model calls
|
|
2093
|
+
*/
|
|
2094
|
+
async callChatModelWithRetry(prompt, currentModelRequirements) {
|
|
2099
2095
|
var _a;
|
|
2100
2096
|
if (this.options.isVerbose) {
|
|
2101
|
-
console.info(`ðŽ ${this.title} callChatModel call`, { prompt });
|
|
2097
|
+
console.info(`ðŽ ${this.title} callChatModel call`, { prompt, currentModelRequirements });
|
|
2102
2098
|
}
|
|
2103
|
-
const { content, parameters,
|
|
2099
|
+
const { content, parameters, format } = prompt;
|
|
2104
2100
|
const client = await this.getClient();
|
|
2105
2101
|
// TODO: [â] Use here more modelRequirements
|
|
2106
|
-
if (
|
|
2102
|
+
if (currentModelRequirements.modelVariant !== 'CHAT') {
|
|
2107
2103
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
2108
2104
|
}
|
|
2109
|
-
const modelName =
|
|
2105
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
2110
2106
|
const modelSettings = {
|
|
2111
2107
|
model: modelName,
|
|
2112
|
-
max_tokens:
|
|
2113
|
-
temperature:
|
|
2108
|
+
max_tokens: currentModelRequirements.maxTokens,
|
|
2109
|
+
temperature: currentModelRequirements.temperature,
|
|
2114
2110
|
// <- TODO: [ð] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
2115
2111
|
// <- Note: [ð§]
|
|
2116
2112
|
}; // <- TODO: [ðĐ] Guard here types better
|
|
@@ -2125,12 +2121,12 @@
|
|
|
2125
2121
|
const rawRequest = {
|
|
2126
2122
|
...modelSettings,
|
|
2127
2123
|
messages: [
|
|
2128
|
-
...(
|
|
2124
|
+
...(currentModelRequirements.systemMessage === undefined
|
|
2129
2125
|
? []
|
|
2130
2126
|
: [
|
|
2131
2127
|
{
|
|
2132
2128
|
role: 'system',
|
|
2133
|
-
content:
|
|
2129
|
+
content: currentModelRequirements.systemMessage,
|
|
2134
2130
|
},
|
|
2135
2131
|
]),
|
|
2136
2132
|
{
|
|
@@ -2144,69 +2140,110 @@
|
|
|
2144
2140
|
if (this.options.isVerbose) {
|
|
2145
2141
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2146
2142
|
}
|
|
2147
|
-
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2143
|
+
try {
|
|
2144
|
+
const rawResponse = await this.limiter
|
|
2145
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
|
|
2146
|
+
.catch((error) => {
|
|
2147
|
+
assertsError(error);
|
|
2148
|
+
if (this.options.isVerbose) {
|
|
2149
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
2150
|
+
}
|
|
2151
|
+
throw error;
|
|
2152
|
+
});
|
|
2151
2153
|
if (this.options.isVerbose) {
|
|
2152
|
-
console.info(colors__default["default"].
|
|
2154
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
2153
2155
|
}
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
|
|
2156
|
+
const complete = $getCurrentDate();
|
|
2157
|
+
if (!rawResponse.choices[0]) {
|
|
2158
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
2159
|
+
}
|
|
2160
|
+
if (rawResponse.choices.length > 1) {
|
|
2161
|
+
// TODO: This should be maybe only warning
|
|
2162
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
2163
|
+
}
|
|
2164
|
+
const resultContent = rawResponse.choices[0].message.content;
|
|
2165
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
2166
|
+
if (resultContent === null) {
|
|
2167
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
|
2168
|
+
}
|
|
2169
|
+
return exportJson({
|
|
2170
|
+
name: 'promptResult',
|
|
2171
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
|
2172
|
+
order: [],
|
|
2173
|
+
value: {
|
|
2174
|
+
content: resultContent,
|
|
2175
|
+
modelName: rawResponse.model || modelName,
|
|
2176
|
+
timing: {
|
|
2177
|
+
start,
|
|
2178
|
+
complete,
|
|
2179
|
+
},
|
|
2180
|
+
usage,
|
|
2181
|
+
rawPromptContent,
|
|
2182
|
+
rawRequest,
|
|
2183
|
+
rawResponse,
|
|
2184
|
+
// <- [ðŊ]
|
|
2185
|
+
},
|
|
2186
|
+
});
|
|
2166
2187
|
}
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
|
|
2188
|
+
catch (error) {
|
|
2189
|
+
assertsError(error);
|
|
2190
|
+
// Check if this is an unsupported parameter error
|
|
2191
|
+
if (!isUnsupportedParameterError(error)) {
|
|
2192
|
+
throw error;
|
|
2193
|
+
}
|
|
2194
|
+
// Parse which parameter is unsupported
|
|
2195
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
2196
|
+
if (!unsupportedParameter) {
|
|
2197
|
+
if (this.options.isVerbose) {
|
|
2198
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
2199
|
+
}
|
|
2200
|
+
throw error;
|
|
2201
|
+
}
|
|
2202
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
|
2203
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
2204
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
|
2205
|
+
// Already retried this parameter, throw the error
|
|
2206
|
+
if (this.options.isVerbose) {
|
|
2207
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
|
2208
|
+
}
|
|
2209
|
+
throw error;
|
|
2210
|
+
}
|
|
2211
|
+
// Mark this parameter as retried
|
|
2212
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
|
2213
|
+
// Log warning in verbose mode
|
|
2214
|
+
if (this.options.isVerbose) {
|
|
2215
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
2216
|
+
}
|
|
2217
|
+
// Remove the unsupported parameter and retry
|
|
2218
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
2219
|
+
return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
|
|
2171
2220
|
}
|
|
2172
|
-
return exportJson({
|
|
2173
|
-
name: 'promptResult',
|
|
2174
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
|
2175
|
-
order: [],
|
|
2176
|
-
value: {
|
|
2177
|
-
content: resultContent,
|
|
2178
|
-
modelName: rawResponse.model || modelName,
|
|
2179
|
-
timing: {
|
|
2180
|
-
start,
|
|
2181
|
-
complete,
|
|
2182
|
-
},
|
|
2183
|
-
usage,
|
|
2184
|
-
rawPromptContent,
|
|
2185
|
-
rawRequest,
|
|
2186
|
-
rawResponse,
|
|
2187
|
-
// <- [ðŊ]
|
|
2188
|
-
},
|
|
2189
|
-
});
|
|
2190
2221
|
}
|
|
2191
2222
|
/**
|
|
2192
2223
|
* Calls OpenAI API to use a complete model.
|
|
2193
2224
|
*/
|
|
2194
2225
|
async callCompletionModel(prompt) {
|
|
2226
|
+
return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
|
|
2227
|
+
}
|
|
2228
|
+
/**
|
|
2229
|
+
* Internal method that handles parameter retry for completion model calls
|
|
2230
|
+
*/
|
|
2231
|
+
async callCompletionModelWithRetry(prompt, currentModelRequirements) {
|
|
2195
2232
|
var _a;
|
|
2196
2233
|
if (this.options.isVerbose) {
|
|
2197
|
-
console.info(`ð ${this.title} callCompletionModel call`, { prompt });
|
|
2234
|
+
console.info(`ð ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
|
|
2198
2235
|
}
|
|
2199
|
-
const { content, parameters
|
|
2236
|
+
const { content, parameters } = prompt;
|
|
2200
2237
|
const client = await this.getClient();
|
|
2201
2238
|
// TODO: [â] Use here more modelRequirements
|
|
2202
|
-
if (
|
|
2239
|
+
if (currentModelRequirements.modelVariant !== 'COMPLETION') {
|
|
2203
2240
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
2204
2241
|
}
|
|
2205
|
-
const modelName =
|
|
2242
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
2206
2243
|
const modelSettings = {
|
|
2207
2244
|
model: modelName,
|
|
2208
|
-
max_tokens:
|
|
2209
|
-
temperature:
|
|
2245
|
+
max_tokens: currentModelRequirements.maxTokens,
|
|
2246
|
+
temperature: currentModelRequirements.temperature,
|
|
2210
2247
|
// <- TODO: [ð] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
2211
2248
|
// <- Note: [ð§]
|
|
2212
2249
|
};
|
|
@@ -2220,46 +2257,81 @@
|
|
|
2220
2257
|
if (this.options.isVerbose) {
|
|
2221
2258
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2222
2259
|
}
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2260
|
+
try {
|
|
2261
|
+
const rawResponse = await this.limiter
|
|
2262
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
|
|
2263
|
+
.catch((error) => {
|
|
2264
|
+
assertsError(error);
|
|
2265
|
+
if (this.options.isVerbose) {
|
|
2266
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
2267
|
+
}
|
|
2268
|
+
throw error;
|
|
2269
|
+
});
|
|
2227
2270
|
if (this.options.isVerbose) {
|
|
2228
|
-
console.info(colors__default["default"].
|
|
2271
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
2229
2272
|
}
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2273
|
+
const complete = $getCurrentDate();
|
|
2274
|
+
if (!rawResponse.choices[0]) {
|
|
2275
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
2276
|
+
}
|
|
2277
|
+
if (rawResponse.choices.length > 1) {
|
|
2278
|
+
// TODO: This should be maybe only warning
|
|
2279
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
2280
|
+
}
|
|
2281
|
+
const resultContent = rawResponse.choices[0].text;
|
|
2282
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
2283
|
+
return exportJson({
|
|
2284
|
+
name: 'promptResult',
|
|
2285
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
|
2286
|
+
order: [],
|
|
2287
|
+
value: {
|
|
2288
|
+
content: resultContent,
|
|
2289
|
+
modelName: rawResponse.model || modelName,
|
|
2290
|
+
timing: {
|
|
2291
|
+
start,
|
|
2292
|
+
complete,
|
|
2293
|
+
},
|
|
2294
|
+
usage,
|
|
2295
|
+
rawPromptContent,
|
|
2296
|
+
rawRequest,
|
|
2297
|
+
rawResponse,
|
|
2298
|
+
// <- [ðŊ]
|
|
2299
|
+
},
|
|
2300
|
+
});
|
|
2238
2301
|
}
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2302
|
+
catch (error) {
|
|
2303
|
+
assertsError(error);
|
|
2304
|
+
// Check if this is an unsupported parameter error
|
|
2305
|
+
if (!isUnsupportedParameterError(error)) {
|
|
2306
|
+
throw error;
|
|
2307
|
+
}
|
|
2308
|
+
// Parse which parameter is unsupported
|
|
2309
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
2310
|
+
if (!unsupportedParameter) {
|
|
2311
|
+
if (this.options.isVerbose) {
|
|
2312
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
2313
|
+
}
|
|
2314
|
+
throw error;
|
|
2315
|
+
}
|
|
2316
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
|
2317
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
2318
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
|
2319
|
+
// Already retried this parameter, throw the error
|
|
2320
|
+
if (this.options.isVerbose) {
|
|
2321
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
|
2322
|
+
}
|
|
2323
|
+
throw error;
|
|
2324
|
+
}
|
|
2325
|
+
// Mark this parameter as retried
|
|
2326
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
|
2327
|
+
// Log warning in verbose mode
|
|
2328
|
+
if (this.options.isVerbose) {
|
|
2329
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
2330
|
+
}
|
|
2331
|
+
// Remove the unsupported parameter and retry
|
|
2332
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
2333
|
+
return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
|
|
2242
2334
|
}
|
|
2243
|
-
const resultContent = rawResponse.choices[0].text;
|
|
2244
|
-
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
2245
|
-
return exportJson({
|
|
2246
|
-
name: 'promptResult',
|
|
2247
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
|
2248
|
-
order: [],
|
|
2249
|
-
value: {
|
|
2250
|
-
content: resultContent,
|
|
2251
|
-
modelName: rawResponse.model || modelName,
|
|
2252
|
-
timing: {
|
|
2253
|
-
start,
|
|
2254
|
-
complete,
|
|
2255
|
-
},
|
|
2256
|
-
usage,
|
|
2257
|
-
rawPromptContent,
|
|
2258
|
-
rawRequest,
|
|
2259
|
-
rawResponse,
|
|
2260
|
-
// <- [ðŊ]
|
|
2261
|
-
},
|
|
2262
|
-
});
|
|
2263
2335
|
}
|
|
2264
2336
|
/**
|
|
2265
2337
|
* Calls OpenAI compatible API to use a embedding model
|
|
@@ -2285,7 +2357,7 @@
|
|
|
2285
2357
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2286
2358
|
}
|
|
2287
2359
|
const rawResponse = await this.limiter
|
|
2288
|
-
.schedule(() => this.
|
|
2360
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
|
2289
2361
|
.catch((error) => {
|
|
2290
2362
|
assertsError(error);
|
|
2291
2363
|
if (this.options.isVerbose) {
|
|
@@ -2347,7 +2419,7 @@
|
|
|
2347
2419
|
/**
|
|
2348
2420
|
* Makes a request with retry logic for network errors like ECONNRESET
|
|
2349
2421
|
*/
|
|
2350
|
-
async
|
|
2422
|
+
async makeRequestWithNetworkRetry(requestFn) {
|
|
2351
2423
|
let lastError;
|
|
2352
2424
|
for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
|
|
2353
2425
|
try {
|
|
@@ -2359,8 +2431,8 @@
|
|
|
2359
2431
|
// Check if this is a retryable network error
|
|
2360
2432
|
const isRetryableError = this.isRetryableNetworkError(error);
|
|
2361
2433
|
if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
|
|
2362
|
-
if (this.options.isVerbose) {
|
|
2363
|
-
console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
|
2434
|
+
if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
|
|
2435
|
+
console.info(colors__default["default"].bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
|
2364
2436
|
}
|
|
2365
2437
|
throw error;
|
|
2366
2438
|
}
|
|
@@ -2370,7 +2442,7 @@
|
|
|
2370
2442
|
const jitterDelay = Math.random() * 500; // Add some randomness
|
|
2371
2443
|
const totalDelay = backoffDelay + jitterDelay;
|
|
2372
2444
|
if (this.options.isVerbose) {
|
|
2373
|
-
console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
|
2445
|
+
console.info(colors__default["default"].bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
|
2374
2446
|
}
|
|
2375
2447
|
// Wait before retrying
|
|
2376
2448
|
await new Promise((resolve) => setTimeout(resolve, totalDelay));
|
|
@@ -2419,8 +2491,17 @@
|
|
|
2419
2491
|
* TODO: [ð] Maybe make custom `OpenAiCompatibleError`
|
|
2420
2492
|
* TODO: [ð§ ][ð] Maybe use `isDeterministic` from options
|
|
2421
2493
|
* TODO: [ð§ ][ð°] Allow to pass `title` for tracking purposes
|
|
2494
|
+
* TODO: [ð§ ][ðĶĒ] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
|
|
2422
2495
|
*/
|
|
2423
2496
|
|
|
2497
|
+
/**
|
|
2498
|
+
* Profile for OpenAI provider
|
|
2499
|
+
*/
|
|
2500
|
+
const OPENAI_PROVIDER_PROFILE = {
|
|
2501
|
+
name: 'OPENAI',
|
|
2502
|
+
fullname: 'OpenAI GPT',
|
|
2503
|
+
color: '#10a37f',
|
|
2504
|
+
};
|
|
2424
2505
|
/**
|
|
2425
2506
|
* Execution Tools for calling OpenAI API
|
|
2426
2507
|
*
|
|
@@ -2443,7 +2524,7 @@
|
|
|
2443
2524
|
return 'Use all models provided by OpenAI';
|
|
2444
2525
|
}
|
|
2445
2526
|
get profile() {
|
|
2446
|
-
return
|
|
2527
|
+
return OPENAI_PROVIDER_PROFILE;
|
|
2447
2528
|
}
|
|
2448
2529
|
/*
|
|
2449
2530
|
Note: Commenting this out to avoid circular dependency
|
|
@@ -3021,6 +3102,14 @@
|
|
|
3021
3102
|
});
|
|
3022
3103
|
}
|
|
3023
3104
|
|
|
3105
|
+
/**
|
|
3106
|
+
* Profile for Remote provider
|
|
3107
|
+
*/
|
|
3108
|
+
const REMOTE_PROVIDER_PROFILE = {
|
|
3109
|
+
name: 'REMOTE',
|
|
3110
|
+
fullname: 'Remote Server',
|
|
3111
|
+
color: '#6b7280',
|
|
3112
|
+
};
|
|
3024
3113
|
/**
|
|
3025
3114
|
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
|
3026
3115
|
*
|
|
@@ -3043,7 +3132,7 @@
|
|
|
3043
3132
|
return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
|
|
3044
3133
|
}
|
|
3045
3134
|
get profile() {
|
|
3046
|
-
return
|
|
3135
|
+
return REMOTE_PROVIDER_PROFILE;
|
|
3047
3136
|
}
|
|
3048
3137
|
/**
|
|
3049
3138
|
* Check the configuration of all execution tools
|