@promptbook/remote-server 0.101.0-2 â 0.101.0-21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +45 -0
- package/esm/index.es.js +58 -109
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +20 -0
- package/esm/typings/src/_packages/core.index.d.ts +14 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +35 -0
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +33 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +34 -0
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +35 -0
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +56 -0
- package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +25 -10
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +34 -0
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +2 -8
- package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +18 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -12
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +29 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/PromptResult.d.ts +2 -4
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
- package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -1
- package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +58 -109
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
- package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
- package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
- package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
- /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts â utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/README.md
CHANGED
|
@@ -49,6 +49,51 @@ npm install @promptbook/remote-server
|
|
|
49
49
|
```
|
|
50
50
|
|
|
51
51
|
|
|
52
|
+
Remote server implementation for Promptbook, enabling distributed execution of promptbook pipelines across network boundaries with REST API and WebSocket support.
|
|
53
|
+
|
|
54
|
+
## ðŊ Purpose and Motivation
|
|
55
|
+
|
|
56
|
+
This package provides a remote server that allows promptbook pipelines to be executed over the network. It enables distributed architectures where promptbook execution can be centralized on powerful servers while clients can access the functionality remotely, making it ideal for scaling promptbook applications and providing API access to promptbook collections.
|
|
57
|
+
|
|
58
|
+
## ð§ High-Level Functionality
|
|
59
|
+
|
|
60
|
+
The package provides remote server capabilities:
|
|
61
|
+
- **HTTP REST API**: RESTful endpoints for pipeline execution and management
|
|
62
|
+
- **WebSocket Support**: Real-time communication for streaming execution results
|
|
63
|
+
- **Authentication**: Support for both anonymous and application-based authentication
|
|
64
|
+
- **Pipeline Management**: Remote access to promptbook collections and pipelines
|
|
65
|
+
- **Execution Orchestration**: Distributed execution of promptbook pipelines
|
|
66
|
+
- **OpenAI Compatibility**: OpenAI-compatible API endpoints for seamless integration
|
|
67
|
+
|
|
68
|
+
## âĻ Key Features
|
|
69
|
+
|
|
70
|
+
- ð **Remote Execution** - Execute promptbook pipelines over HTTP/WebSocket
|
|
71
|
+
- ð **Authentication Modes** - Support for anonymous and application-based access
|
|
72
|
+
- ðĄ **Real-time Communication** - WebSocket support for streaming results
|
|
73
|
+
- ð **OpenAI Compatible** - Use promptbooks as OpenAI-compatible models
|
|
74
|
+
- ð **Scalable Architecture** - Distribute promptbook execution across servers
|
|
75
|
+
- ð **Pipeline Management** - Remote access to collections and individual pipelines
|
|
76
|
+
- ðĄïļ **Security** - Configurable authentication and access control
|
|
77
|
+
- ⥠**High Performance** - Optimized for concurrent pipeline execution
|
|
78
|
+
|
|
79
|
+
## ðĶ Exported Entities
|
|
80
|
+
|
|
81
|
+
### Version Information
|
|
82
|
+
- `BOOK_LANGUAGE_VERSION` - Current book language version
|
|
83
|
+
- `PROMPTBOOK_ENGINE_VERSION` - Current engine version
|
|
84
|
+
|
|
85
|
+
### Server Management
|
|
86
|
+
- `startRemoteServer` - Start the remote promptbook server
|
|
87
|
+
|
|
88
|
+
### Configuration Types
|
|
89
|
+
- `RemoteServerOptions` - Configuration options for remote server (type)
|
|
90
|
+
|
|
91
|
+
### Authentication Types
|
|
92
|
+
- `Identification` - Base identification interface (type)
|
|
93
|
+
- `ApplicationModeIdentification` - Application mode identification (type)
|
|
94
|
+
- `AnonymousModeIdentification` - Anonymous mode identification (type)
|
|
95
|
+
|
|
96
|
+
> ðĄ This package provides remote server functionality for promptbook applications. For the core functionality, see [@promptbook/core](#-packages) or install all packages with `npm i ptbk`
|
|
52
97
|
|
|
53
98
|
|
|
54
99
|
---
|
package/esm/index.es.js
CHANGED
|
@@ -31,7 +31,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
|
|
|
31
31
|
* @generated
|
|
32
32
|
* @see https://github.com/webgptorg/promptbook
|
|
33
33
|
*/
|
|
34
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
|
34
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-21';
|
|
35
35
|
/**
|
|
36
36
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
37
37
|
* Note: [ð] Ignore a discrepancy between file name and entity name
|
|
@@ -2878,75 +2878,32 @@ function countUsage(llmTools) {
|
|
|
2878
2878
|
*/
|
|
2879
2879
|
|
|
2880
2880
|
/**
|
|
2881
|
-
*
|
|
2882
|
-
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
2881
|
+
* Takes an item or an array of items and returns an array of items
|
|
2883
2882
|
*
|
|
2884
|
-
*
|
|
2883
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
2884
|
+
* 2) Undefined returns empty array
|
|
2885
|
+
* 3) Array returns itself
|
|
2886
|
+
*
|
|
2887
|
+
* @private internal utility
|
|
2885
2888
|
*/
|
|
2886
|
-
|
|
2887
|
-
|
|
2888
|
-
|
|
2889
|
-
|
|
2890
|
-
|
|
2891
|
-
|
|
2892
|
-
}
|
|
2893
|
-
|
|
2894
|
-
|
|
2895
|
-
|
|
2896
|
-
color: '#d97706', // Anthropic's orange/amber color
|
|
2897
|
-
},
|
|
2898
|
-
AZURE_OPENAI: {
|
|
2899
|
-
name: 'AZURE_OPENAI',
|
|
2900
|
-
fullname: 'Azure OpenAI',
|
|
2901
|
-
color: '#0078d4', // Microsoft Azure blue
|
|
2902
|
-
},
|
|
2903
|
-
GOOGLE: {
|
|
2904
|
-
name: 'GOOGLE',
|
|
2905
|
-
fullname: 'Google Gemini',
|
|
2906
|
-
color: '#4285f4', // Google blue
|
|
2907
|
-
},
|
|
2908
|
-
DEEPSEEK: {
|
|
2909
|
-
name: 'DEEPSEEK',
|
|
2910
|
-
fullname: 'DeepSeek',
|
|
2911
|
-
color: '#7c3aed', // Purple color for DeepSeek
|
|
2912
|
-
},
|
|
2913
|
-
OLLAMA: {
|
|
2914
|
-
name: 'OLLAMA',
|
|
2915
|
-
fullname: 'Ollama',
|
|
2916
|
-
color: '#059669', // Emerald green for local models
|
|
2917
|
-
},
|
|
2918
|
-
REMOTE: {
|
|
2919
|
-
name: 'REMOTE',
|
|
2920
|
-
fullname: 'Remote Server',
|
|
2921
|
-
color: '#6b7280', // Gray for remote/proxy connections
|
|
2922
|
-
},
|
|
2923
|
-
MOCKED_ECHO: {
|
|
2924
|
-
name: 'MOCKED_ECHO',
|
|
2925
|
-
fullname: 'Echo (Test)',
|
|
2926
|
-
color: '#8b5cf6', // Purple for test/mock tools
|
|
2927
|
-
},
|
|
2928
|
-
MOCKED_FAKE: {
|
|
2929
|
-
name: 'MOCKED_FAKE',
|
|
2930
|
-
fullname: 'Fake LLM (Test)',
|
|
2931
|
-
color: '#ec4899', // Pink for fake/test tools
|
|
2932
|
-
},
|
|
2933
|
-
VERCEL: {
|
|
2934
|
-
name: 'VERCEL',
|
|
2935
|
-
fullname: 'Vercel AI',
|
|
2936
|
-
color: '#000000', // Vercel's black
|
|
2937
|
-
},
|
|
2938
|
-
MULTIPLE: {
|
|
2939
|
-
name: 'MULTIPLE',
|
|
2940
|
-
fullname: 'Multiple Providers',
|
|
2941
|
-
color: '#6366f1', // Indigo for combined/multiple providers
|
|
2942
|
-
},
|
|
2943
|
-
};
|
|
2889
|
+
function arrayableToArray(input) {
|
|
2890
|
+
if (input === undefined) {
|
|
2891
|
+
return [];
|
|
2892
|
+
}
|
|
2893
|
+
if (input instanceof Array) {
|
|
2894
|
+
return input;
|
|
2895
|
+
}
|
|
2896
|
+
return [input];
|
|
2897
|
+
}
|
|
2898
|
+
|
|
2944
2899
|
/**
|
|
2945
|
-
*
|
|
2946
|
-
* TODO: [ð] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
|
|
2947
|
-
* Note: [ð] Ignore a discrepancy between file name and entity name
|
|
2900
|
+
* Profile for Multiple providers aggregation
|
|
2948
2901
|
*/
|
|
2949
|
-
|
|
2902
|
+
const MULTIPLE_PROVIDER_PROFILE = {
|
|
2903
|
+
name: 'MULTIPLE',
|
|
2904
|
+
fullname: 'Multiple Providers',
|
|
2905
|
+
color: '#6366f1',
|
|
2906
|
+
};
|
|
2950
2907
|
/**
|
|
2951
2908
|
* Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
|
|
2952
2909
|
*
|
|
@@ -2957,12 +2914,10 @@ class MultipleLlmExecutionTools {
|
|
|
2957
2914
|
/**
|
|
2958
2915
|
* Gets array of execution tools in order of priority
|
|
2959
2916
|
*/
|
|
2960
|
-
constructor(...llmExecutionTools) {
|
|
2917
|
+
constructor(title, ...llmExecutionTools) {
|
|
2918
|
+
this.title = title;
|
|
2961
2919
|
this.llmExecutionTools = llmExecutionTools;
|
|
2962
2920
|
}
|
|
2963
|
-
get title() {
|
|
2964
|
-
return 'Multiple LLM Providers';
|
|
2965
|
-
}
|
|
2966
2921
|
get description() {
|
|
2967
2922
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
2968
2923
|
.map(({ title, description }, index) => {
|
|
@@ -2984,7 +2939,7 @@ class MultipleLlmExecutionTools {
|
|
|
2984
2939
|
`);
|
|
2985
2940
|
}
|
|
2986
2941
|
get profile() {
|
|
2987
|
-
return
|
|
2942
|
+
return MULTIPLE_PROVIDER_PROFILE;
|
|
2988
2943
|
}
|
|
2989
2944
|
/**
|
|
2990
2945
|
* Check the configuration of all execution tools
|
|
@@ -3048,7 +3003,7 @@ class MultipleLlmExecutionTools {
|
|
|
3048
3003
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
3049
3004
|
// <- case [ðĪ]:
|
|
3050
3005
|
default:
|
|
3051
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
3006
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
3052
3007
|
}
|
|
3053
3008
|
}
|
|
3054
3009
|
catch (error) {
|
|
@@ -3069,7 +3024,7 @@ class MultipleLlmExecutionTools {
|
|
|
3069
3024
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
|
3070
3025
|
// 3) ...
|
|
3071
3026
|
spaceTrim((block) => `
|
|
3072
|
-
All execution tools failed:
|
|
3027
|
+
All execution tools of ${this.title} failed:
|
|
3073
3028
|
|
|
3074
3029
|
${block(errors
|
|
3075
3030
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
|
@@ -3078,11 +3033,11 @@ class MultipleLlmExecutionTools {
|
|
|
3078
3033
|
`));
|
|
3079
3034
|
}
|
|
3080
3035
|
else if (this.llmExecutionTools.length === 0) {
|
|
3081
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
|
3036
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
|
3082
3037
|
}
|
|
3083
3038
|
else {
|
|
3084
3039
|
throw new PipelineExecutionError(spaceTrim((block) => `
|
|
3085
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
|
3040
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
|
3086
3041
|
|
|
3087
3042
|
Available \`LlmExecutionTools\`:
|
|
3088
3043
|
${block(this.description)}
|
|
@@ -3112,7 +3067,7 @@ class MultipleLlmExecutionTools {
|
|
|
3112
3067
|
*
|
|
3113
3068
|
* @public exported from `@promptbook/core`
|
|
3114
3069
|
*/
|
|
3115
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
|
3070
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
|
3116
3071
|
if (llmExecutionTools.length === 0) {
|
|
3117
3072
|
const warningMessage = spaceTrim(`
|
|
3118
3073
|
You have not provided any \`LlmExecutionTools\`
|
|
@@ -3144,30 +3099,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
|
|
|
3144
3099
|
};
|
|
3145
3100
|
*/
|
|
3146
3101
|
}
|
|
3147
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
|
3102
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
|
3148
3103
|
}
|
|
3149
3104
|
/**
|
|
3150
3105
|
* TODO: [ð·ââïļ] @@@ Manual about construction of llmTools
|
|
3151
3106
|
*/
|
|
3152
3107
|
|
|
3153
3108
|
/**
|
|
3154
|
-
*
|
|
3109
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
3155
3110
|
*
|
|
3156
|
-
*
|
|
3157
|
-
* 2) Undefined returns empty array
|
|
3158
|
-
* 3) Array returns itself
|
|
3159
|
-
*
|
|
3160
|
-
* @private internal utility
|
|
3111
|
+
* @public exported from `@promptbook/core`
|
|
3161
3112
|
*/
|
|
3162
|
-
function
|
|
3163
|
-
|
|
3164
|
-
|
|
3165
|
-
|
|
3166
|
-
|
|
3167
|
-
|
|
3168
|
-
}
|
|
3169
|
-
return [input];
|
|
3113
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
|
3114
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
|
3115
|
+
const llmTools = _llms.length === 1
|
|
3116
|
+
? _llms[0]
|
|
3117
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
|
3118
|
+
return llmTools;
|
|
3170
3119
|
}
|
|
3120
|
+
/**
|
|
3121
|
+
* TODO: [ð·ââïļ] @@@ Manual about construction of llmTools
|
|
3122
|
+
*/
|
|
3171
3123
|
|
|
3172
3124
|
/**
|
|
3173
3125
|
* Prepares the persona for the pipeline
|
|
@@ -3186,8 +3138,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
3186
3138
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
3187
3139
|
tools,
|
|
3188
3140
|
});
|
|
3189
|
-
const
|
|
3190
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3141
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
3191
3142
|
const availableModels = (await llmTools.listModels())
|
|
3192
3143
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
3193
3144
|
.map(({ modelName, modelDescription }) => ({
|
|
@@ -3231,6 +3182,7 @@ async function preparePersona(personaDescription, tools, options) {
|
|
|
3231
3182
|
};
|
|
3232
3183
|
}
|
|
3233
3184
|
/**
|
|
3185
|
+
* TODO: [ðĐ] DRY `preparePersona` and `selectBestModelFromAvailable`
|
|
3234
3186
|
* TODO: [ð][main] If the persona was prepared with different version or different set of models, prepare it once again
|
|
3235
3187
|
* TODO: [ðĒ] Check validity of `modelName` in pipeline
|
|
3236
3188
|
* TODO: [ðĒ] Check validity of `systemMessage` in pipeline
|
|
@@ -4349,9 +4301,7 @@ async function preparePipeline(pipeline, tools, options) {
|
|
|
4349
4301
|
if (tools === undefined || tools.llm === undefined) {
|
|
4350
4302
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4351
4303
|
}
|
|
4352
|
-
|
|
4353
|
-
const _llms = arrayableToArray(tools.llm);
|
|
4354
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4304
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
4355
4305
|
const llmToolsWithUsage = countUsage(llmTools);
|
|
4356
4306
|
// <- TODO: [ðŊ]
|
|
4357
4307
|
/*
|
|
@@ -5511,9 +5461,7 @@ async function executeAttempts(options) {
|
|
|
5511
5461
|
$scriptPipelineExecutionErrors: [],
|
|
5512
5462
|
$failedResults: [], // Track all failed attempts
|
|
5513
5463
|
};
|
|
5514
|
-
|
|
5515
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5516
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5464
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5517
5465
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5518
5466
|
const isJokerAttempt = attemptIndex < 0;
|
|
5519
5467
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
@@ -6033,9 +5981,7 @@ async function getKnowledgeForTask(options) {
|
|
|
6033
5981
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
6034
5982
|
}
|
|
6035
5983
|
try {
|
|
6036
|
-
|
|
6037
|
-
const _llms = arrayableToArray(tools.llm);
|
|
6038
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5984
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
6039
5985
|
const taskEmbeddingPrompt = {
|
|
6040
5986
|
title: 'Knowledge Search',
|
|
6041
5987
|
modelRequirements: {
|
|
@@ -6636,13 +6582,13 @@ function createPipelineExecutor(options) {
|
|
|
6636
6582
|
// Calculate and update tldr based on pipeline progress
|
|
6637
6583
|
const cv = newOngoingResult;
|
|
6638
6584
|
// Calculate progress based on parameters resolved vs total parameters
|
|
6639
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
|
6585
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
|
6640
6586
|
let resolvedParameters = 0;
|
|
6641
6587
|
let currentTaskTitle = '';
|
|
6642
6588
|
// Get the resolved parameters from output parameters
|
|
6643
6589
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
|
6644
6590
|
// Count how many output parameters have non-empty values
|
|
6645
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6591
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6646
6592
|
}
|
|
6647
6593
|
// Try to determine current task from execution report
|
|
6648
6594
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
|
@@ -6898,7 +6844,7 @@ function $registeredLlmToolsMessage() {
|
|
|
6898
6844
|
* @public exported from `@promptbook/core`
|
|
6899
6845
|
*/
|
|
6900
6846
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
6901
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
|
6847
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
|
6902
6848
|
const llmTools = configuration.map((llmConfiguration) => {
|
|
6903
6849
|
const registeredItem = $llmToolsRegister
|
|
6904
6850
|
.list()
|
|
@@ -6930,7 +6876,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
|
6930
6876
|
...llmConfiguration.options,
|
|
6931
6877
|
});
|
|
6932
6878
|
});
|
|
6933
|
-
return joinLlmExecutionTools(...llmTools);
|
|
6879
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
|
6934
6880
|
}
|
|
6935
6881
|
/**
|
|
6936
6882
|
* TODO: [ð] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
|
@@ -8179,8 +8125,11 @@ function startRemoteServer(options) {
|
|
|
8179
8125
|
if (isAnonymous === true) {
|
|
8180
8126
|
// Note: Anonymous mode
|
|
8181
8127
|
// TODO: Maybe check that configuration is not empty
|
|
8182
|
-
const { llmToolsConfiguration } = identification;
|
|
8183
|
-
llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
|
|
8128
|
+
const { userId, llmToolsConfiguration } = identification;
|
|
8129
|
+
llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
|
|
8130
|
+
title: `LLM Tools for anonymous user "${userId}" on server`,
|
|
8131
|
+
isVerbose,
|
|
8132
|
+
});
|
|
8184
8133
|
}
|
|
8185
8134
|
else if (isAnonymous === false && createLlmExecutionTools !== null) {
|
|
8186
8135
|
// Note: Application mode
|
|
@@ -8214,7 +8163,7 @@ function startRemoteServer(options) {
|
|
|
8214
8163
|
});
|
|
8215
8164
|
// Note: OpenAI-compatible chat completions endpoint
|
|
8216
8165
|
app.post('/v1/chat/completions', async (request, response) => {
|
|
8217
|
-
// TODO:
|
|
8166
|
+
// TODO: [ð§ ][ðĶĒ] Make OpenAI compatible more promptbook-native - make reverse adapter from LlmExecutionTools to OpenAI-compatible:
|
|
8218
8167
|
try {
|
|
8219
8168
|
const params = request.body;
|
|
8220
8169
|
const { model, messages } = params;
|