@promptbook/wizard 0.101.0-8 → 0.101.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -4
- package/esm/index.es.js +423 -250
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +14 -0
- package/esm/typings/src/_packages/core.index.d.ts +12 -0
- package/esm/typings/src/_packages/types.index.d.ts +8 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +25 -10
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +423 -250
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/umd/index.umd.js
CHANGED
@@ -48,7 +48,7 @@
|
|
48
48
|
* @generated
|
49
49
|
* @see https://github.com/webgptorg/promptbook
|
50
50
|
*/
|
51
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0
|
51
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
|
52
52
|
/**
|
53
53
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
54
54
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
@@ -1349,76 +1349,6 @@
|
|
1349
1349
|
return deserializedError;
|
1350
1350
|
}
|
1351
1351
|
|
1352
|
-
/**
|
1353
|
-
* Predefined profiles for LLM providers to maintain consistency across the application
|
1354
|
-
* These profiles represent each provider as a virtual persona in chat interfaces
|
1355
|
-
*
|
1356
|
-
* @private !!!!
|
1357
|
-
*/
|
1358
|
-
const LLM_PROVIDER_PROFILES = {
|
1359
|
-
OPENAI: {
|
1360
|
-
name: 'OPENAI',
|
1361
|
-
fullname: 'OpenAI GPT',
|
1362
|
-
color: '#10a37f', // OpenAI's signature green
|
1363
|
-
// Note: avatarSrc could be added when we have provider logos available
|
1364
|
-
},
|
1365
|
-
ANTHROPIC: {
|
1366
|
-
name: 'ANTHROPIC',
|
1367
|
-
fullname: 'Anthropic Claude',
|
1368
|
-
color: '#d97706', // Anthropic's orange/amber color
|
1369
|
-
},
|
1370
|
-
AZURE_OPENAI: {
|
1371
|
-
name: 'AZURE_OPENAI',
|
1372
|
-
fullname: 'Azure OpenAI',
|
1373
|
-
color: '#0078d4', // Microsoft Azure blue
|
1374
|
-
},
|
1375
|
-
GOOGLE: {
|
1376
|
-
name: 'GOOGLE',
|
1377
|
-
fullname: 'Google Gemini',
|
1378
|
-
color: '#4285f4', // Google blue
|
1379
|
-
},
|
1380
|
-
DEEPSEEK: {
|
1381
|
-
name: 'DEEPSEEK',
|
1382
|
-
fullname: 'DeepSeek',
|
1383
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
1384
|
-
},
|
1385
|
-
OLLAMA: {
|
1386
|
-
name: 'OLLAMA',
|
1387
|
-
fullname: 'Ollama',
|
1388
|
-
color: '#059669', // Emerald green for local models
|
1389
|
-
},
|
1390
|
-
REMOTE: {
|
1391
|
-
name: 'REMOTE',
|
1392
|
-
fullname: 'Remote Server',
|
1393
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
1394
|
-
},
|
1395
|
-
MOCKED_ECHO: {
|
1396
|
-
name: 'MOCKED_ECHO',
|
1397
|
-
fullname: 'Echo (Test)',
|
1398
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
1399
|
-
},
|
1400
|
-
MOCKED_FAKE: {
|
1401
|
-
name: 'MOCKED_FAKE',
|
1402
|
-
fullname: 'Fake LLM (Test)',
|
1403
|
-
color: '#ec4899', // Pink for fake/test tools
|
1404
|
-
},
|
1405
|
-
VERCEL: {
|
1406
|
-
name: 'VERCEL',
|
1407
|
-
fullname: 'Vercel AI',
|
1408
|
-
color: '#000000', // Vercel's black
|
1409
|
-
},
|
1410
|
-
MULTIPLE: {
|
1411
|
-
name: 'MULTIPLE',
|
1412
|
-
fullname: 'Multiple Providers',
|
1413
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
1414
|
-
},
|
1415
|
-
};
|
1416
|
-
/**
|
1417
|
-
* TODO: Refactor this - each profile must be alongside the provider definition
|
1418
|
-
* TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
1419
|
-
* Note: [💞] Ignore a discrepancy between file name and entity name
|
1420
|
-
*/
|
1421
|
-
|
1422
1352
|
/**
|
1423
1353
|
* Tests if given string is valid URL.
|
1424
1354
|
*
|
@@ -1512,6 +1442,14 @@
|
|
1512
1442
|
function keepUnused(...valuesToKeep) {
|
1513
1443
|
}
|
1514
1444
|
|
1445
|
+
/**
|
1446
|
+
* Profile for Remote provider
|
1447
|
+
*/
|
1448
|
+
const REMOTE_PROVIDER_PROFILE = {
|
1449
|
+
name: 'REMOTE',
|
1450
|
+
fullname: 'Remote Server',
|
1451
|
+
color: '#6b7280',
|
1452
|
+
};
|
1515
1453
|
/**
|
1516
1454
|
* Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
|
1517
1455
|
*
|
@@ -1534,7 +1472,7 @@
|
|
1534
1472
|
return `Models from Promptbook remote server ${this.options.remoteServerUrl}`;
|
1535
1473
|
}
|
1536
1474
|
get profile() {
|
1537
|
-
return
|
1475
|
+
return REMOTE_PROVIDER_PROFILE;
|
1538
1476
|
}
|
1539
1477
|
/**
|
1540
1478
|
* Check the configuration of all execution tools
|
@@ -2507,6 +2445,14 @@
|
|
2507
2445
|
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
2508
2446
|
*/
|
2509
2447
|
|
2448
|
+
/**
|
2449
|
+
* Profile for Anthropic Claude provider
|
2450
|
+
*/
|
2451
|
+
const ANTHROPIC_PROVIDER_PROFILE = {
|
2452
|
+
name: 'ANTHROPIC',
|
2453
|
+
fullname: 'Anthropic Claude',
|
2454
|
+
color: '#d97706',
|
2455
|
+
};
|
2510
2456
|
/**
|
2511
2457
|
* Execution Tools for calling Anthropic Claude API.
|
2512
2458
|
*
|
@@ -2535,7 +2481,7 @@
|
|
2535
2481
|
return 'Use all models provided by Anthropic Claude';
|
2536
2482
|
}
|
2537
2483
|
get profile() {
|
2538
|
-
return
|
2484
|
+
return ANTHROPIC_PROVIDER_PROFILE;
|
2539
2485
|
}
|
2540
2486
|
async getClient() {
|
2541
2487
|
if (this.client === null) {
|
@@ -2826,7 +2772,7 @@
|
|
2826
2772
|
modelVariant: 'CHAT',
|
2827
2773
|
modelTitle: 'gpt-5-mini',
|
2828
2774
|
modelName: 'gpt-5-mini',
|
2829
|
-
modelDescription:
|
2775
|
+
modelDescription: 'A faster, cost-efficient version of GPT-5 for well-defined tasks with 200K context window. Maintains core GPT-5 capabilities while offering 5x faster inference and significantly lower costs. Features enhanced instruction following and reduced latency for production applications requiring quick responses with high quality.',
|
2830
2776
|
pricing: {
|
2831
2777
|
prompt: pricing(`$0.25 / 1M tokens`),
|
2832
2778
|
output: pricing(`$2.00 / 1M tokens`),
|
@@ -2838,7 +2784,7 @@
|
|
2838
2784
|
modelVariant: 'CHAT',
|
2839
2785
|
modelTitle: 'gpt-5-nano',
|
2840
2786
|
modelName: 'gpt-5-nano',
|
2841
|
-
modelDescription:
|
2787
|
+
modelDescription: 'The fastest, most cost-efficient version of GPT-5 with 200K context window. Optimized for summarization, classification, and simple reasoning tasks. Features 10x faster inference than base GPT-5 while maintaining good quality for straightforward applications. Ideal for high-volume, cost-sensitive deployments.',
|
2842
2788
|
pricing: {
|
2843
2789
|
prompt: pricing(`$0.05 / 1M tokens`),
|
2844
2790
|
output: pricing(`$0.40 / 1M tokens`),
|
@@ -2850,7 +2796,7 @@
|
|
2850
2796
|
modelVariant: 'CHAT',
|
2851
2797
|
modelTitle: 'gpt-4.1',
|
2852
2798
|
modelName: 'gpt-4.1',
|
2853
|
-
modelDescription:
|
2799
|
+
modelDescription: 'Smartest non-reasoning model with 128K context window. Enhanced version of GPT-4 with improved instruction following, better factual accuracy, and reduced hallucinations. Features advanced function calling capabilities and superior performance on coding tasks. Ideal for applications requiring high intelligence without reasoning overhead.',
|
2854
2800
|
pricing: {
|
2855
2801
|
prompt: pricing(`$3.00 / 1M tokens`),
|
2856
2802
|
output: pricing(`$12.00 / 1M tokens`),
|
@@ -2862,7 +2808,7 @@
|
|
2862
2808
|
modelVariant: 'CHAT',
|
2863
2809
|
modelTitle: 'gpt-4.1-mini',
|
2864
2810
|
modelName: 'gpt-4.1-mini',
|
2865
|
-
modelDescription:
|
2811
|
+
modelDescription: 'Smaller, faster version of GPT-4.1 with 128K context window. Balances intelligence and efficiency with 3x faster inference than base GPT-4.1. Maintains strong capabilities across text generation, reasoning, and coding while offering better cost-performance ratio for most applications.',
|
2866
2812
|
pricing: {
|
2867
2813
|
prompt: pricing(`$0.80 / 1M tokens`),
|
2868
2814
|
output: pricing(`$3.20 / 1M tokens`),
|
@@ -2874,7 +2820,7 @@
|
|
2874
2820
|
modelVariant: 'CHAT',
|
2875
2821
|
modelTitle: 'gpt-4.1-nano',
|
2876
2822
|
modelName: 'gpt-4.1-nano',
|
2877
|
-
modelDescription:
|
2823
|
+
modelDescription: 'Fastest, most cost-efficient version of GPT-4.1 with 128K context window. Optimized for high-throughput applications requiring good quality at minimal cost. Features 5x faster inference than GPT-4.1 while maintaining adequate performance for most general-purpose tasks.',
|
2878
2824
|
pricing: {
|
2879
2825
|
prompt: pricing(`$0.20 / 1M tokens`),
|
2880
2826
|
output: pricing(`$0.80 / 1M tokens`),
|
@@ -2886,7 +2832,7 @@
|
|
2886
2832
|
modelVariant: 'CHAT',
|
2887
2833
|
modelTitle: 'o3',
|
2888
2834
|
modelName: 'o3',
|
2889
|
-
modelDescription:
|
2835
|
+
modelDescription: 'Advanced reasoning model with 128K context window specializing in complex logical, mathematical, and analytical tasks. Successor to o1 with enhanced step-by-step problem-solving capabilities and superior performance on STEM-focused problems. Ideal for professional applications requiring deep analytical thinking and precise reasoning.',
|
2890
2836
|
pricing: {
|
2891
2837
|
prompt: pricing(`$15.00 / 1M tokens`),
|
2892
2838
|
output: pricing(`$60.00 / 1M tokens`),
|
@@ -2898,7 +2844,7 @@
|
|
2898
2844
|
modelVariant: 'CHAT',
|
2899
2845
|
modelTitle: 'o3-pro',
|
2900
2846
|
modelName: 'o3-pro',
|
2901
|
-
modelDescription:
|
2847
|
+
modelDescription: 'Enhanced version of o3 with more compute allocated for better responses on the most challenging problems. Features extended reasoning time and improved accuracy on complex analytical tasks. Designed for applications where maximum reasoning quality is more important than response speed.',
|
2902
2848
|
pricing: {
|
2903
2849
|
prompt: pricing(`$30.00 / 1M tokens`),
|
2904
2850
|
output: pricing(`$120.00 / 1M tokens`),
|
@@ -2910,7 +2856,7 @@
|
|
2910
2856
|
modelVariant: 'CHAT',
|
2911
2857
|
modelTitle: 'o4-mini',
|
2912
2858
|
modelName: 'o4-mini',
|
2913
|
-
modelDescription:
|
2859
|
+
modelDescription: 'Fast, cost-efficient reasoning model with 128K context window. Successor to o1-mini with improved analytical capabilities while maintaining speed advantages. Features enhanced mathematical reasoning and logical problem-solving at significantly lower cost than full reasoning models.',
|
2914
2860
|
pricing: {
|
2915
2861
|
prompt: pricing(`$4.00 / 1M tokens`),
|
2916
2862
|
output: pricing(`$16.00 / 1M tokens`),
|
@@ -2922,7 +2868,7 @@
|
|
2922
2868
|
modelVariant: 'CHAT',
|
2923
2869
|
modelTitle: 'o3-deep-research',
|
2924
2870
|
modelName: 'o3-deep-research',
|
2925
|
-
modelDescription:
|
2871
|
+
modelDescription: 'Most powerful deep research model with 128K context window. Specialized for comprehensive research tasks, literature analysis, and complex information synthesis. Features advanced citation capabilities and enhanced factual accuracy for academic and professional research applications.',
|
2926
2872
|
pricing: {
|
2927
2873
|
prompt: pricing(`$25.00 / 1M tokens`),
|
2928
2874
|
output: pricing(`$100.00 / 1M tokens`),
|
@@ -2934,7 +2880,7 @@
|
|
2934
2880
|
modelVariant: 'CHAT',
|
2935
2881
|
modelTitle: 'o4-mini-deep-research',
|
2936
2882
|
modelName: 'o4-mini-deep-research',
|
2937
|
-
modelDescription:
|
2883
|
+
modelDescription: 'Faster, more affordable deep research model with 128K context window. Balances research capabilities with cost efficiency, offering good performance on literature review, fact-checking, and information synthesis tasks at a more accessible price point.',
|
2938
2884
|
pricing: {
|
2939
2885
|
prompt: pricing(`$12.00 / 1M tokens`),
|
2940
2886
|
output: pricing(`$48.00 / 1M tokens`),
|
@@ -3390,6 +3336,14 @@
|
|
3390
3336
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
3391
3337
|
*/
|
3392
3338
|
|
3339
|
+
/**
|
3340
|
+
* Profile for Azure OpenAI provider
|
3341
|
+
*/
|
3342
|
+
const AZURE_OPENAI_PROVIDER_PROFILE = {
|
3343
|
+
name: 'AZURE_OPENAI',
|
3344
|
+
fullname: 'Azure OpenAI',
|
3345
|
+
color: '#0078d4',
|
3346
|
+
};
|
3393
3347
|
/**
|
3394
3348
|
* Execution Tools for calling Azure OpenAI API.
|
3395
3349
|
*
|
@@ -3418,6 +3372,9 @@
|
|
3418
3372
|
get description() {
|
3419
3373
|
return 'Use all models trained by OpenAI provided by Azure';
|
3420
3374
|
}
|
3375
|
+
get profile() {
|
3376
|
+
return AZURE_OPENAI_PROVIDER_PROFILE;
|
3377
|
+
}
|
3421
3378
|
async getClient() {
|
3422
3379
|
if (this.client === null) {
|
3423
3380
|
this.client = new openai.OpenAIClient(`https://${this.options.resourceName}.openai.azure.com/`, new openai.AzureKeyCredential(this.options.apiKey));
|
@@ -3815,6 +3772,14 @@
|
|
3815
3772
|
}
|
3816
3773
|
}
|
3817
3774
|
|
3775
|
+
/**
|
3776
|
+
* Profile for Vercel AI adapter
|
3777
|
+
*/
|
3778
|
+
const VERCEL_PROVIDER_PROFILE = {
|
3779
|
+
name: 'VERCEL',
|
3780
|
+
fullname: 'Vercel AI',
|
3781
|
+
color: '#000000',
|
3782
|
+
};
|
3818
3783
|
/**
|
3819
3784
|
* Adapter which creates Promptbook execution tools from Vercel provider
|
3820
3785
|
*
|
@@ -3837,6 +3802,7 @@
|
|
3837
3802
|
return {
|
3838
3803
|
title,
|
3839
3804
|
description,
|
3805
|
+
profile: VERCEL_PROVIDER_PROFILE,
|
3840
3806
|
checkConfiguration() {
|
3841
3807
|
// Note: There is no way how to check configuration of Vercel provider
|
3842
3808
|
return Promise.resolve();
|
@@ -4118,6 +4084,14 @@
|
|
4118
4084
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
4119
4085
|
*/
|
4120
4086
|
|
4087
|
+
/**
|
4088
|
+
* Profile for Deepseek provider
|
4089
|
+
*/
|
4090
|
+
const DEEPSEEK_PROVIDER_PROFILE = {
|
4091
|
+
name: 'DEEPSEEK',
|
4092
|
+
fullname: 'DeepSeek',
|
4093
|
+
color: '#7c3aed',
|
4094
|
+
};
|
4121
4095
|
/**
|
4122
4096
|
* Execution Tools for calling Deepseek API.
|
4123
4097
|
*
|
@@ -4135,13 +4109,17 @@
|
|
4135
4109
|
...options,
|
4136
4110
|
// apiKey: process.env.DEEPSEEK_GENERATIVE_AI_API_KEY,
|
4137
4111
|
});
|
4138
|
-
|
4112
|
+
const baseTools = createExecutionToolsFromVercelProvider({
|
4139
4113
|
title: 'Deepseek',
|
4140
4114
|
description: 'Implementation of Deepseek models',
|
4141
4115
|
vercelProvider: deepseekVercelProvider,
|
4142
4116
|
availableModels: DEEPSEEK_MODELS,
|
4143
4117
|
...options,
|
4144
4118
|
});
|
4119
|
+
return {
|
4120
|
+
...baseTools,
|
4121
|
+
profile: DEEPSEEK_PROVIDER_PROFILE,
|
4122
|
+
};
|
4145
4123
|
}, {
|
4146
4124
|
packageName: '@promptbook/deepseek',
|
4147
4125
|
className: 'DeepseekExecutionTools',
|
@@ -4443,6 +4421,14 @@
|
|
4443
4421
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
4444
4422
|
*/
|
4445
4423
|
|
4424
|
+
/**
|
4425
|
+
* Profile for Google Gemini provider
|
4426
|
+
*/
|
4427
|
+
const GOOGLE_PROVIDER_PROFILE = {
|
4428
|
+
name: 'GOOGLE',
|
4429
|
+
fullname: 'Google Gemini',
|
4430
|
+
color: '#4285f4',
|
4431
|
+
};
|
4446
4432
|
/**
|
4447
4433
|
* Execution Tools for calling Google Gemini API.
|
4448
4434
|
*
|
@@ -4460,13 +4446,17 @@
|
|
4460
4446
|
...options,
|
4461
4447
|
/// apiKey: process.env.GOOGLE_GENERATIVE_AI_API_KEY,
|
4462
4448
|
});
|
4463
|
-
|
4449
|
+
const baseTools = createExecutionToolsFromVercelProvider({
|
4464
4450
|
title: 'Google',
|
4465
4451
|
description: 'Implementation of Google models',
|
4466
4452
|
vercelProvider: googleGeminiVercelProvider,
|
4467
4453
|
availableModels: GOOGLE_MODELS,
|
4468
4454
|
...options,
|
4469
4455
|
});
|
4456
|
+
return {
|
4457
|
+
...baseTools,
|
4458
|
+
profile: GOOGLE_PROVIDER_PROFILE,
|
4459
|
+
};
|
4470
4460
|
}, {
|
4471
4461
|
packageName: '@promptbook/google',
|
4472
4462
|
className: 'GoogleExecutionTools',
|
@@ -4634,6 +4624,62 @@
|
|
4634
4624
|
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
4635
4625
|
*/
|
4636
4626
|
|
4627
|
+
/**
|
4628
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
4629
|
+
*
|
4630
|
+
* @param errorMessage The error message from OpenAI API
|
4631
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
4632
|
+
* @private utility of LLM Tools
|
4633
|
+
*/
|
4634
|
+
function parseUnsupportedParameterError(errorMessage) {
|
4635
|
+
// Pattern to match "Unsupported value: 'parameter' does not support ..."
|
4636
|
+
const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
|
4637
|
+
if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
|
4638
|
+
return unsupportedValueMatch[1];
|
4639
|
+
}
|
4640
|
+
// Pattern to match "'parameter' of type ... is not supported with this model"
|
4641
|
+
const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
|
4642
|
+
if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
|
4643
|
+
return parameterTypeMatch[1];
|
4644
|
+
}
|
4645
|
+
return null;
|
4646
|
+
}
|
4647
|
+
/**
|
4648
|
+
* Creates a copy of model requirements with the specified parameter removed
|
4649
|
+
*
|
4650
|
+
* @param modelRequirements Original model requirements
|
4651
|
+
* @param unsupportedParameter The parameter to remove
|
4652
|
+
* @returns New model requirements without the unsupported parameter
|
4653
|
+
* @private utility of LLM Tools
|
4654
|
+
*/
|
4655
|
+
function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
|
4656
|
+
const newRequirements = { ...modelRequirements };
|
4657
|
+
// Map of parameter names that might appear in error messages to ModelRequirements properties
|
4658
|
+
const parameterMap = {
|
4659
|
+
temperature: 'temperature',
|
4660
|
+
max_tokens: 'maxTokens',
|
4661
|
+
maxTokens: 'maxTokens',
|
4662
|
+
seed: 'seed',
|
4663
|
+
};
|
4664
|
+
const propertyToRemove = parameterMap[unsupportedParameter];
|
4665
|
+
if (propertyToRemove && propertyToRemove in newRequirements) {
|
4666
|
+
delete newRequirements[propertyToRemove];
|
4667
|
+
}
|
4668
|
+
return newRequirements;
|
4669
|
+
}
|
4670
|
+
/**
|
4671
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
4672
|
+
* @param error The error to check
|
4673
|
+
* @returns true if this is an unsupported parameter error
|
4674
|
+
* @private utility of LLM Tools
|
4675
|
+
*/
|
4676
|
+
function isUnsupportedParameterError(error) {
|
4677
|
+
const errorMessage = error.message.toLowerCase();
|
4678
|
+
return (errorMessage.includes('unsupported value:') ||
|
4679
|
+
errorMessage.includes('is not supported with this model') ||
|
4680
|
+
errorMessage.includes('does not support'));
|
4681
|
+
}
|
4682
|
+
|
4637
4683
|
/**
|
4638
4684
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
4639
4685
|
*
|
@@ -4651,6 +4697,10 @@
|
|
4651
4697
|
* OpenAI API client.
|
4652
4698
|
*/
|
4653
4699
|
this.client = null;
|
4700
|
+
/**
|
4701
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
4702
|
+
*/
|
4703
|
+
this.retriedUnsupportedParameters = new Set();
|
4654
4704
|
// TODO: Allow configuring rate limits via options
|
4655
4705
|
this.limiter = new Bottleneck__default["default"]({
|
4656
4706
|
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
@@ -4712,21 +4762,27 @@
|
|
4712
4762
|
* Calls OpenAI compatible API to use a chat model.
|
4713
4763
|
*/
|
4714
4764
|
async callChatModel(prompt) {
|
4765
|
+
return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
|
4766
|
+
}
|
4767
|
+
/**
|
4768
|
+
* Internal method that handles parameter retry for chat model calls
|
4769
|
+
*/
|
4770
|
+
async callChatModelWithRetry(prompt, currentModelRequirements) {
|
4715
4771
|
var _a;
|
4716
4772
|
if (this.options.isVerbose) {
|
4717
|
-
console.info(`💬 ${this.title} callChatModel call`, { prompt });
|
4773
|
+
console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
|
4718
4774
|
}
|
4719
|
-
const { content, parameters,
|
4775
|
+
const { content, parameters, format } = prompt;
|
4720
4776
|
const client = await this.getClient();
|
4721
4777
|
// TODO: [☂] Use here more modelRequirements
|
4722
|
-
if (
|
4778
|
+
if (currentModelRequirements.modelVariant !== 'CHAT') {
|
4723
4779
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
4724
4780
|
}
|
4725
|
-
const modelName =
|
4781
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
|
4726
4782
|
const modelSettings = {
|
4727
4783
|
model: modelName,
|
4728
|
-
max_tokens:
|
4729
|
-
temperature:
|
4784
|
+
max_tokens: currentModelRequirements.maxTokens,
|
4785
|
+
temperature: currentModelRequirements.temperature,
|
4730
4786
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4731
4787
|
// <- Note: [🧆]
|
4732
4788
|
}; // <- TODO: [💩] Guard here types better
|
@@ -4741,12 +4797,12 @@
|
|
4741
4797
|
const rawRequest = {
|
4742
4798
|
...modelSettings,
|
4743
4799
|
messages: [
|
4744
|
-
...(
|
4800
|
+
...(currentModelRequirements.systemMessage === undefined
|
4745
4801
|
? []
|
4746
4802
|
: [
|
4747
4803
|
{
|
4748
4804
|
role: 'system',
|
4749
|
-
content:
|
4805
|
+
content: currentModelRequirements.systemMessage,
|
4750
4806
|
},
|
4751
4807
|
]),
|
4752
4808
|
{
|
@@ -4760,69 +4816,110 @@
|
|
4760
4816
|
if (this.options.isVerbose) {
|
4761
4817
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4762
4818
|
}
|
4763
|
-
|
4764
|
-
|
4765
|
-
|
4766
|
-
|
4819
|
+
try {
|
4820
|
+
const rawResponse = await this.limiter
|
4821
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
|
4822
|
+
.catch((error) => {
|
4823
|
+
assertsError(error);
|
4824
|
+
if (this.options.isVerbose) {
|
4825
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
4826
|
+
}
|
4827
|
+
throw error;
|
4828
|
+
});
|
4767
4829
|
if (this.options.isVerbose) {
|
4768
|
-
console.info(colors__default["default"].
|
4830
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
4769
4831
|
}
|
4770
|
-
|
4771
|
-
|
4772
|
-
|
4773
|
-
|
4774
|
-
|
4775
|
-
|
4776
|
-
|
4777
|
-
|
4778
|
-
|
4779
|
-
|
4780
|
-
|
4781
|
-
|
4832
|
+
const complete = $getCurrentDate();
|
4833
|
+
if (!rawResponse.choices[0]) {
|
4834
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
4835
|
+
}
|
4836
|
+
if (rawResponse.choices.length > 1) {
|
4837
|
+
// TODO: This should be maybe only warning
|
4838
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
4839
|
+
}
|
4840
|
+
const resultContent = rawResponse.choices[0].message.content;
|
4841
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4842
|
+
if (resultContent === null) {
|
4843
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
4844
|
+
}
|
4845
|
+
return exportJson({
|
4846
|
+
name: 'promptResult',
|
4847
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
4848
|
+
order: [],
|
4849
|
+
value: {
|
4850
|
+
content: resultContent,
|
4851
|
+
modelName: rawResponse.model || modelName,
|
4852
|
+
timing: {
|
4853
|
+
start,
|
4854
|
+
complete,
|
4855
|
+
},
|
4856
|
+
usage,
|
4857
|
+
rawPromptContent,
|
4858
|
+
rawRequest,
|
4859
|
+
rawResponse,
|
4860
|
+
// <- [🗯]
|
4861
|
+
},
|
4862
|
+
});
|
4782
4863
|
}
|
4783
|
-
|
4784
|
-
|
4785
|
-
|
4786
|
-
|
4864
|
+
catch (error) {
|
4865
|
+
assertsError(error);
|
4866
|
+
// Check if this is an unsupported parameter error
|
4867
|
+
if (!isUnsupportedParameterError(error)) {
|
4868
|
+
throw error;
|
4869
|
+
}
|
4870
|
+
// Parse which parameter is unsupported
|
4871
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
4872
|
+
if (!unsupportedParameter) {
|
4873
|
+
if (this.options.isVerbose) {
|
4874
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
4875
|
+
}
|
4876
|
+
throw error;
|
4877
|
+
}
|
4878
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
4879
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
4880
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
4881
|
+
// Already retried this parameter, throw the error
|
4882
|
+
if (this.options.isVerbose) {
|
4883
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
4884
|
+
}
|
4885
|
+
throw error;
|
4886
|
+
}
|
4887
|
+
// Mark this parameter as retried
|
4888
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
4889
|
+
// Log warning in verbose mode
|
4890
|
+
if (this.options.isVerbose) {
|
4891
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
4892
|
+
}
|
4893
|
+
// Remove the unsupported parameter and retry
|
4894
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
4895
|
+
return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
|
4787
4896
|
}
|
4788
|
-
return exportJson({
|
4789
|
-
name: 'promptResult',
|
4790
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
4791
|
-
order: [],
|
4792
|
-
value: {
|
4793
|
-
content: resultContent,
|
4794
|
-
modelName: rawResponse.model || modelName,
|
4795
|
-
timing: {
|
4796
|
-
start,
|
4797
|
-
complete,
|
4798
|
-
},
|
4799
|
-
usage,
|
4800
|
-
rawPromptContent,
|
4801
|
-
rawRequest,
|
4802
|
-
rawResponse,
|
4803
|
-
// <- [🗯]
|
4804
|
-
},
|
4805
|
-
});
|
4806
4897
|
}
|
4807
4898
|
/**
|
4808
4899
|
* Calls OpenAI API to use a complete model.
|
4809
4900
|
*/
|
4810
4901
|
async callCompletionModel(prompt) {
|
4902
|
+
return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
|
4903
|
+
}
|
4904
|
+
/**
|
4905
|
+
* Internal method that handles parameter retry for completion model calls
|
4906
|
+
*/
|
4907
|
+
async callCompletionModelWithRetry(prompt, currentModelRequirements) {
|
4811
4908
|
var _a;
|
4812
4909
|
if (this.options.isVerbose) {
|
4813
|
-
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
|
4910
|
+
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
|
4814
4911
|
}
|
4815
|
-
const { content, parameters
|
4912
|
+
const { content, parameters } = prompt;
|
4816
4913
|
const client = await this.getClient();
|
4817
4914
|
// TODO: [☂] Use here more modelRequirements
|
4818
|
-
if (
|
4915
|
+
if (currentModelRequirements.modelVariant !== 'COMPLETION') {
|
4819
4916
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
4820
4917
|
}
|
4821
|
-
const modelName =
|
4918
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
4822
4919
|
const modelSettings = {
|
4823
4920
|
model: modelName,
|
4824
|
-
max_tokens:
|
4825
|
-
temperature:
|
4921
|
+
max_tokens: currentModelRequirements.maxTokens,
|
4922
|
+
temperature: currentModelRequirements.temperature,
|
4826
4923
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
4827
4924
|
// <- Note: [🧆]
|
4828
4925
|
};
|
@@ -4836,46 +4933,81 @@
|
|
4836
4933
|
if (this.options.isVerbose) {
|
4837
4934
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4838
4935
|
}
|
4839
|
-
|
4840
|
-
|
4841
|
-
|
4842
|
-
|
4936
|
+
try {
|
4937
|
+
const rawResponse = await this.limiter
|
4938
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
|
4939
|
+
.catch((error) => {
|
4940
|
+
assertsError(error);
|
4941
|
+
if (this.options.isVerbose) {
|
4942
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
4943
|
+
}
|
4944
|
+
throw error;
|
4945
|
+
});
|
4843
4946
|
if (this.options.isVerbose) {
|
4844
|
-
console.info(colors__default["default"].
|
4947
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
4845
4948
|
}
|
4846
|
-
|
4847
|
-
|
4848
|
-
|
4849
|
-
|
4850
|
-
|
4851
|
-
|
4852
|
-
|
4853
|
-
|
4949
|
+
const complete = $getCurrentDate();
|
4950
|
+
if (!rawResponse.choices[0]) {
|
4951
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
4952
|
+
}
|
4953
|
+
if (rawResponse.choices.length > 1) {
|
4954
|
+
// TODO: This should be maybe only warning
|
4955
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
4956
|
+
}
|
4957
|
+
const resultContent = rawResponse.choices[0].text;
|
4958
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4959
|
+
return exportJson({
|
4960
|
+
name: 'promptResult',
|
4961
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
4962
|
+
order: [],
|
4963
|
+
value: {
|
4964
|
+
content: resultContent,
|
4965
|
+
modelName: rawResponse.model || modelName,
|
4966
|
+
timing: {
|
4967
|
+
start,
|
4968
|
+
complete,
|
4969
|
+
},
|
4970
|
+
usage,
|
4971
|
+
rawPromptContent,
|
4972
|
+
rawRequest,
|
4973
|
+
rawResponse,
|
4974
|
+
// <- [🗯]
|
4975
|
+
},
|
4976
|
+
});
|
4854
4977
|
}
|
4855
|
-
|
4856
|
-
|
4857
|
-
|
4978
|
+
catch (error) {
|
4979
|
+
assertsError(error);
|
4980
|
+
// Check if this is an unsupported parameter error
|
4981
|
+
if (!isUnsupportedParameterError(error)) {
|
4982
|
+
throw error;
|
4983
|
+
}
|
4984
|
+
// Parse which parameter is unsupported
|
4985
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
4986
|
+
if (!unsupportedParameter) {
|
4987
|
+
if (this.options.isVerbose) {
|
4988
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
4989
|
+
}
|
4990
|
+
throw error;
|
4991
|
+
}
|
4992
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
4993
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
4994
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
4995
|
+
// Already retried this parameter, throw the error
|
4996
|
+
if (this.options.isVerbose) {
|
4997
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
4998
|
+
}
|
4999
|
+
throw error;
|
5000
|
+
}
|
5001
|
+
// Mark this parameter as retried
|
5002
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
5003
|
+
// Log warning in verbose mode
|
5004
|
+
if (this.options.isVerbose) {
|
5005
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
5006
|
+
}
|
5007
|
+
// Remove the unsupported parameter and retry
|
5008
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
5009
|
+
return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
|
4858
5010
|
}
|
4859
|
-
const resultContent = rawResponse.choices[0].text;
|
4860
|
-
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
4861
|
-
return exportJson({
|
4862
|
-
name: 'promptResult',
|
4863
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
4864
|
-
order: [],
|
4865
|
-
value: {
|
4866
|
-
content: resultContent,
|
4867
|
-
modelName: rawResponse.model || modelName,
|
4868
|
-
timing: {
|
4869
|
-
start,
|
4870
|
-
complete,
|
4871
|
-
},
|
4872
|
-
usage,
|
4873
|
-
rawPromptContent,
|
4874
|
-
rawRequest,
|
4875
|
-
rawResponse,
|
4876
|
-
// <- [🗯]
|
4877
|
-
},
|
4878
|
-
});
|
4879
5011
|
}
|
4880
5012
|
/**
|
4881
5013
|
* Calls OpenAI compatible API to use a embedding model
|
@@ -4901,7 +5033,7 @@
|
|
4901
5033
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
4902
5034
|
}
|
4903
5035
|
const rawResponse = await this.limiter
|
4904
|
-
.schedule(() => this.
|
5036
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
4905
5037
|
.catch((error) => {
|
4906
5038
|
assertsError(error);
|
4907
5039
|
if (this.options.isVerbose) {
|
@@ -4963,7 +5095,7 @@
|
|
4963
5095
|
/**
|
4964
5096
|
* Makes a request with retry logic for network errors like ECONNRESET
|
4965
5097
|
*/
|
4966
|
-
async
|
5098
|
+
async makeRequestWithNetworkRetry(requestFn) {
|
4967
5099
|
let lastError;
|
4968
5100
|
for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
|
4969
5101
|
try {
|
@@ -4975,8 +5107,8 @@
|
|
4975
5107
|
// Check if this is a retryable network error
|
4976
5108
|
const isRetryableError = this.isRetryableNetworkError(error);
|
4977
5109
|
if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
|
4978
|
-
if (this.options.isVerbose) {
|
4979
|
-
console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
5110
|
+
if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
|
5111
|
+
console.info(colors__default["default"].bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
4980
5112
|
}
|
4981
5113
|
throw error;
|
4982
5114
|
}
|
@@ -4986,7 +5118,7 @@
|
|
4986
5118
|
const jitterDelay = Math.random() * 500; // Add some randomness
|
4987
5119
|
const totalDelay = backoffDelay + jitterDelay;
|
4988
5120
|
if (this.options.isVerbose) {
|
4989
|
-
console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
5121
|
+
console.info(colors__default["default"].bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
4990
5122
|
}
|
4991
5123
|
// Wait before retrying
|
4992
5124
|
await new Promise((resolve) => setTimeout(resolve, totalDelay));
|
@@ -5035,6 +5167,7 @@
|
|
5035
5167
|
* TODO: [🛄] Maybe make custom `OpenAiCompatibleError`
|
5036
5168
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
5037
5169
|
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
5170
|
+
* TODO: [🧠][🦢] Make reverse adapter from LlmExecutionTools to OpenAI-compatible:
|
5038
5171
|
*/
|
5039
5172
|
|
5040
5173
|
/**
|
@@ -5290,6 +5423,14 @@
|
|
5290
5423
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
5291
5424
|
*/
|
5292
5425
|
|
5426
|
+
/**
|
5427
|
+
* Profile for Ollama provider
|
5428
|
+
*/
|
5429
|
+
const OLLAMA_PROVIDER_PROFILE = {
|
5430
|
+
name: 'OLLAMA',
|
5431
|
+
fullname: 'Ollama',
|
5432
|
+
color: '#059669',
|
5433
|
+
};
|
5293
5434
|
/**
|
5294
5435
|
* Execution Tools for calling Ollama API
|
5295
5436
|
*
|
@@ -5312,6 +5453,9 @@
|
|
5312
5453
|
get description() {
|
5313
5454
|
return 'Use all models provided by Ollama';
|
5314
5455
|
}
|
5456
|
+
get profile() {
|
5457
|
+
return OLLAMA_PROVIDER_PROFILE;
|
5458
|
+
}
|
5315
5459
|
/**
|
5316
5460
|
* List all available models (non dynamically)
|
5317
5461
|
*
|
@@ -5515,6 +5659,14 @@
|
|
5515
5659
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
5516
5660
|
*/
|
5517
5661
|
|
5662
|
+
/**
|
5663
|
+
* Profile for OpenAI provider
|
5664
|
+
*/
|
5665
|
+
const OPENAI_PROVIDER_PROFILE = {
|
5666
|
+
name: 'OPENAI',
|
5667
|
+
fullname: 'OpenAI GPT',
|
5668
|
+
color: '#10a37f',
|
5669
|
+
};
|
5518
5670
|
/**
|
5519
5671
|
* Execution Tools for calling OpenAI API
|
5520
5672
|
*
|
@@ -5537,7 +5689,7 @@
|
|
5537
5689
|
return 'Use all models provided by OpenAI';
|
5538
5690
|
}
|
5539
5691
|
get profile() {
|
5540
|
-
return
|
5692
|
+
return OPENAI_PROVIDER_PROFILE;
|
5541
5693
|
}
|
5542
5694
|
/*
|
5543
5695
|
Note: Commenting this out to avoid circular dependency
|
@@ -6367,11 +6519,12 @@
|
|
6367
6519
|
catch (error) {
|
6368
6520
|
// Note: If we can't create cache directory, continue without it
|
6369
6521
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
6370
|
-
if (error instanceof Error &&
|
6371
|
-
error.message.includes('
|
6372
|
-
|
6373
|
-
|
6374
|
-
|
6522
|
+
if (error instanceof Error &&
|
6523
|
+
(error.message.includes('EROFS') ||
|
6524
|
+
error.message.includes('read-only') ||
|
6525
|
+
error.message.includes('EACCES') ||
|
6526
|
+
error.message.includes('EPERM') ||
|
6527
|
+
error.message.includes('ENOENT'))) ;
|
6375
6528
|
else {
|
6376
6529
|
// Re-throw other unexpected errors
|
6377
6530
|
throw error;
|
@@ -7713,6 +7866,33 @@
|
|
7713
7866
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
7714
7867
|
*/
|
7715
7868
|
|
7869
|
+
/**
|
7870
|
+
* Takes an item or an array of items and returns an array of items
|
7871
|
+
*
|
7872
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
7873
|
+
* 2) Undefined returns empty array
|
7874
|
+
* 3) Array returns itself
|
7875
|
+
*
|
7876
|
+
* @private internal utility
|
7877
|
+
*/
|
7878
|
+
function arrayableToArray(input) {
|
7879
|
+
if (input === undefined) {
|
7880
|
+
return [];
|
7881
|
+
}
|
7882
|
+
if (input instanceof Array) {
|
7883
|
+
return input;
|
7884
|
+
}
|
7885
|
+
return [input];
|
7886
|
+
}
|
7887
|
+
|
7888
|
+
/**
|
7889
|
+
* Profile for Multiple providers aggregation
|
7890
|
+
*/
|
7891
|
+
const MULTIPLE_PROVIDER_PROFILE = {
|
7892
|
+
name: 'MULTIPLE',
|
7893
|
+
fullname: 'Multiple Providers',
|
7894
|
+
color: '#6366f1',
|
7895
|
+
};
|
7716
7896
|
/**
|
7717
7897
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
7718
7898
|
*
|
@@ -7723,12 +7903,10 @@
|
|
7723
7903
|
/**
|
7724
7904
|
* Gets array of execution tools in order of priority
|
7725
7905
|
*/
|
7726
|
-
constructor(...llmExecutionTools) {
|
7906
|
+
constructor(title, ...llmExecutionTools) {
|
7907
|
+
this.title = title;
|
7727
7908
|
this.llmExecutionTools = llmExecutionTools;
|
7728
7909
|
}
|
7729
|
-
get title() {
|
7730
|
-
return 'Multiple LLM Providers';
|
7731
|
-
}
|
7732
7910
|
get description() {
|
7733
7911
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
7734
7912
|
.map(({ title, description }, index) => {
|
@@ -7750,7 +7928,7 @@
|
|
7750
7928
|
`);
|
7751
7929
|
}
|
7752
7930
|
get profile() {
|
7753
|
-
return
|
7931
|
+
return MULTIPLE_PROVIDER_PROFILE;
|
7754
7932
|
}
|
7755
7933
|
/**
|
7756
7934
|
* Check the configuration of all execution tools
|
@@ -7814,7 +7992,7 @@
|
|
7814
7992
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
7815
7993
|
// <- case [🤖]:
|
7816
7994
|
default:
|
7817
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
7995
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
7818
7996
|
}
|
7819
7997
|
}
|
7820
7998
|
catch (error) {
|
@@ -7835,7 +8013,7 @@
|
|
7835
8013
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
7836
8014
|
// 3) ...
|
7837
8015
|
spaceTrim__default["default"]((block) => `
|
7838
|
-
All execution tools failed:
|
8016
|
+
All execution tools of ${this.title} failed:
|
7839
8017
|
|
7840
8018
|
${block(errors
|
7841
8019
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
@@ -7844,11 +8022,11 @@
|
|
7844
8022
|
`));
|
7845
8023
|
}
|
7846
8024
|
else if (this.llmExecutionTools.length === 0) {
|
7847
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
8025
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
7848
8026
|
}
|
7849
8027
|
else {
|
7850
8028
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
7851
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
8029
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
7852
8030
|
|
7853
8031
|
Available \`LlmExecutionTools\`:
|
7854
8032
|
${block(this.description)}
|
@@ -7878,7 +8056,7 @@
|
|
7878
8056
|
*
|
7879
8057
|
* @public exported from `@promptbook/core`
|
7880
8058
|
*/
|
7881
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
8059
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
7882
8060
|
if (llmExecutionTools.length === 0) {
|
7883
8061
|
const warningMessage = spaceTrim__default["default"](`
|
7884
8062
|
You have not provided any \`LlmExecutionTools\`
|
@@ -7910,30 +8088,27 @@
|
|
7910
8088
|
};
|
7911
8089
|
*/
|
7912
8090
|
}
|
7913
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
8091
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
7914
8092
|
}
|
7915
8093
|
/**
|
7916
8094
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
7917
8095
|
*/
|
7918
8096
|
|
7919
8097
|
/**
|
7920
|
-
*
|
8098
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
7921
8099
|
*
|
7922
|
-
*
|
7923
|
-
* 2) Undefined returns empty array
|
7924
|
-
* 3) Array returns itself
|
7925
|
-
*
|
7926
|
-
* @private internal utility
|
8100
|
+
* @public exported from `@promptbook/core`
|
7927
8101
|
*/
|
7928
|
-
function
|
7929
|
-
|
7930
|
-
|
7931
|
-
|
7932
|
-
|
7933
|
-
|
7934
|
-
}
|
7935
|
-
return [input];
|
8102
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
8103
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
8104
|
+
const llmTools = _llms.length === 1
|
8105
|
+
? _llms[0]
|
8106
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
8107
|
+
return llmTools;
|
7936
8108
|
}
|
8109
|
+
/**
|
8110
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
8111
|
+
*/
|
7937
8112
|
|
7938
8113
|
/**
|
7939
8114
|
* Prepares the persona for the pipeline
|
@@ -7952,8 +8127,7 @@
|
|
7952
8127
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
7953
8128
|
tools,
|
7954
8129
|
});
|
7955
|
-
const
|
7956
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8130
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
7957
8131
|
const availableModels = (await llmTools.listModels())
|
7958
8132
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
7959
8133
|
.map(({ modelName, modelDescription }) => ({
|
@@ -7997,6 +8171,7 @@
|
|
7997
8171
|
};
|
7998
8172
|
}
|
7999
8173
|
/**
|
8174
|
+
* TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
|
8000
8175
|
* TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
|
8001
8176
|
* TODO: [🏢] Check validity of `modelName` in pipeline
|
8002
8177
|
* TODO: [🏢] Check validity of `systemMessage` in pipeline
|
@@ -8569,9 +8744,7 @@
|
|
8569
8744
|
if (tools === undefined || tools.llm === undefined) {
|
8570
8745
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
8571
8746
|
}
|
8572
|
-
|
8573
|
-
const _llms = arrayableToArray(tools.llm);
|
8574
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
8747
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
8575
8748
|
const llmToolsWithUsage = countUsage(llmTools);
|
8576
8749
|
// <- TODO: [🌯]
|
8577
8750
|
/*
|
@@ -9441,9 +9614,7 @@
|
|
9441
9614
|
$scriptPipelineExecutionErrors: [],
|
9442
9615
|
$failedResults: [], // Track all failed attempts
|
9443
9616
|
};
|
9444
|
-
|
9445
|
-
const _llms = arrayableToArray(tools.llm);
|
9446
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
9617
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
9447
9618
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
9448
9619
|
const isJokerAttempt = attemptIndex < 0;
|
9449
9620
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
@@ -9963,9 +10134,7 @@
|
|
9963
10134
|
return ''; // <- Note: Np knowledge present, return empty string
|
9964
10135
|
}
|
9965
10136
|
try {
|
9966
|
-
|
9967
|
-
const _llms = arrayableToArray(tools.llm);
|
9968
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10137
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
9969
10138
|
const taskEmbeddingPrompt = {
|
9970
10139
|
title: 'Knowledge Search',
|
9971
10140
|
modelRequirements: {
|
@@ -10566,13 +10735,13 @@
|
|
10566
10735
|
// Calculate and update tldr based on pipeline progress
|
10567
10736
|
const cv = newOngoingResult;
|
10568
10737
|
// Calculate progress based on parameters resolved vs total parameters
|
10569
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
10738
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
10570
10739
|
let resolvedParameters = 0;
|
10571
10740
|
let currentTaskTitle = '';
|
10572
10741
|
// Get the resolved parameters from output parameters
|
10573
10742
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
10574
10743
|
// Count how many output parameters have non-empty values
|
10575
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
10744
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
10576
10745
|
}
|
10577
10746
|
// Try to determine current task from execution report
|
10578
10747
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
@@ -10682,9 +10851,7 @@
|
|
10682
10851
|
throw new MissingToolsError('LLM tools are required for scraping external files');
|
10683
10852
|
// <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
|
10684
10853
|
}
|
10685
|
-
|
10686
|
-
const _llms = arrayableToArray(llm);
|
10687
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
10854
|
+
const llmTools = getSingleLlmExecutionTools(llm);
|
10688
10855
|
// TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
|
10689
10856
|
const collection = createCollectionFromJson(...PipelineCollection);
|
10690
10857
|
const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
|
@@ -11461,11 +11628,12 @@
|
|
11461
11628
|
catch (error) {
|
11462
11629
|
// Note: If we can't write to cache, we'll continue without caching
|
11463
11630
|
// This handles read-only filesystems like Vercel
|
11464
|
-
if (error instanceof Error &&
|
11465
|
-
error.message.includes('
|
11466
|
-
|
11467
|
-
|
11468
|
-
|
11631
|
+
if (error instanceof Error &&
|
11632
|
+
(error.message.includes('EROFS') ||
|
11633
|
+
error.message.includes('read-only') ||
|
11634
|
+
error.message.includes('EACCES') ||
|
11635
|
+
error.message.includes('EPERM') ||
|
11636
|
+
error.message.includes('ENOENT'))) ;
|
11469
11637
|
else {
|
11470
11638
|
// Re-throw other unexpected errors
|
11471
11639
|
throw error;
|
@@ -11765,11 +11933,12 @@
|
|
11765
11933
|
catch (error) {
|
11766
11934
|
// Note: If we can't write to cache, we'll continue without caching
|
11767
11935
|
// This handles read-only filesystems like Vercel
|
11768
|
-
if (error instanceof Error &&
|
11769
|
-
error.message.includes('
|
11770
|
-
|
11771
|
-
|
11772
|
-
|
11936
|
+
if (error instanceof Error &&
|
11937
|
+
(error.message.includes('EROFS') ||
|
11938
|
+
error.message.includes('read-only') ||
|
11939
|
+
error.message.includes('EACCES') ||
|
11940
|
+
error.message.includes('EPERM') ||
|
11941
|
+
error.message.includes('ENOENT'))) ;
|
11773
11942
|
else {
|
11774
11943
|
// Re-throw other unexpected errors
|
11775
11944
|
throw error;
|
@@ -12523,11 +12692,12 @@
|
|
12523
12692
|
catch (error) {
|
12524
12693
|
// Note: If we can't write to cache, silently ignore the error
|
12525
12694
|
// This handles read-only filesystems, permission issues, and missing parent directories
|
12526
|
-
if (error instanceof Error &&
|
12527
|
-
error.message.includes('
|
12528
|
-
|
12529
|
-
|
12530
|
-
|
12695
|
+
if (error instanceof Error &&
|
12696
|
+
(error.message.includes('EROFS') ||
|
12697
|
+
error.message.includes('read-only') ||
|
12698
|
+
error.message.includes('EACCES') ||
|
12699
|
+
error.message.includes('EPERM') ||
|
12700
|
+
error.message.includes('ENOENT'))) {
|
12531
12701
|
// Silently ignore filesystem errors - caching is optional
|
12532
12702
|
return;
|
12533
12703
|
}
|
@@ -12820,7 +12990,7 @@
|
|
12820
12990
|
* @public exported from `@promptbook/core`
|
12821
12991
|
*/
|
12822
12992
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
12823
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12993
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
12824
12994
|
const llmTools = configuration.map((llmConfiguration) => {
|
12825
12995
|
const registeredItem = $llmToolsRegister
|
12826
12996
|
.list()
|
@@ -12852,7 +13022,7 @@
|
|
12852
13022
|
...llmConfiguration.options,
|
12853
13023
|
});
|
12854
13024
|
});
|
12855
|
-
return joinLlmExecutionTools(...llmTools);
|
13025
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
12856
13026
|
}
|
12857
13027
|
/**
|
12858
13028
|
* TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
@@ -12969,7 +13139,9 @@
|
|
12969
13139
|
});
|
12970
13140
|
}
|
12971
13141
|
else if (strategy === 'BRING_YOUR_OWN_KEYS') {
|
12972
|
-
llmExecutionTools = await $provideLlmToolsFromEnv(
|
13142
|
+
llmExecutionTools = await $provideLlmToolsFromEnv({
|
13143
|
+
title: 'LLM Tools for wizard or CLI with BYOK strategy',
|
13144
|
+
});
|
12973
13145
|
}
|
12974
13146
|
else {
|
12975
13147
|
throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
|
@@ -17002,7 +17174,7 @@
|
|
17002
17174
|
throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
|
17003
17175
|
}
|
17004
17176
|
const fs = $provideFilesystemForNode();
|
17005
|
-
const llm = await $provideLlmToolsFromEnv(options);
|
17177
|
+
const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
|
17006
17178
|
const executables = await $provideExecutablesForNode();
|
17007
17179
|
const tools = {
|
17008
17180
|
llm,
|
@@ -17411,11 +17583,12 @@
|
|
17411
17583
|
catch (error) {
|
17412
17584
|
// Note: Ignore filesystem errors (like EROFS on read-only systems like Vercel)
|
17413
17585
|
// The compiled book can still be used even if it can't be cached
|
17414
|
-
if (error instanceof Error &&
|
17415
|
-
error.message.includes('
|
17416
|
-
|
17417
|
-
|
17418
|
-
|
17586
|
+
if (error instanceof Error &&
|
17587
|
+
(error.message.includes('EROFS') ||
|
17588
|
+
error.message.includes('read-only') ||
|
17589
|
+
error.message.includes('EACCES') ||
|
17590
|
+
error.message.includes('EPERM') ||
|
17591
|
+
error.message.includes('ENOENT'))) ;
|
17419
17592
|
else {
|
17420
17593
|
// Re-throw other unexpected errors
|
17421
17594
|
throw error;
|