@promptbook/remote-server 0.101.0-2 → 0.101.0-20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +45 -0
- package/esm/index.es.js +53 -42
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/components.index.d.ts +20 -0
- package/esm/typings/src/_packages/core.index.d.ts +14 -0
- package/esm/typings/src/_packages/types.index.d.ts +14 -0
- package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
- package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
- package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
- package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
- package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
- package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
- package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
- package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
- package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
- package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
- package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
- package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
- package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
- package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
- package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
- package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
- package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
- package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
- package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
- package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
- package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
- package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
- package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
- package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
- package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
- package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
- package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
- package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
- package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
- package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
- package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
- package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
- package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
- package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
- package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
- package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
- package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
- package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
- package/esm/typings/src/personas/preparePersona.d.ts +1 -0
- package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
- package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
- package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
- package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
- package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
- package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +53 -42
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
- package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
- package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
- package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
- package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
- package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
- /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
|
@@ -9,5 +9,6 @@ import { string_markdown } from '../../types/typeAliases';
|
|
|
9
9
|
*/
|
|
10
10
|
export declare function humanizeAiText(aiText: string_markdown): string_markdown;
|
|
11
11
|
/**
|
|
12
|
+
* TODO: [🧠] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
|
|
12
13
|
* TODO: [🅾️] !!! Use this across the project where AI text is involved
|
|
13
14
|
*/
|
|
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
|
|
|
15
15
|
export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
|
|
16
16
|
/**
|
|
17
17
|
* Represents the version string of the Promptbook engine.
|
|
18
|
-
* It follows semantic versioning (e.g., `0.101.0-
|
|
18
|
+
* It follows semantic versioning (e.g., `0.101.0-19`).
|
|
19
19
|
*
|
|
20
20
|
* @generated
|
|
21
21
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/remote-server",
|
|
3
|
-
"version": "0.101.0-
|
|
3
|
+
"version": "0.101.0-20",
|
|
4
4
|
"description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -95,7 +95,7 @@
|
|
|
95
95
|
"module": "./esm/index.es.js",
|
|
96
96
|
"typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
|
|
97
97
|
"peerDependencies": {
|
|
98
|
-
"@promptbook/core": "0.101.0-
|
|
98
|
+
"@promptbook/core": "0.101.0-20"
|
|
99
99
|
},
|
|
100
100
|
"dependencies": {
|
|
101
101
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
* @generated
|
|
48
48
|
* @see https://github.com/webgptorg/promptbook
|
|
49
49
|
*/
|
|
50
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
|
50
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
|
|
51
51
|
/**
|
|
52
52
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
53
53
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -2893,6 +2893,25 @@
|
|
|
2893
2893
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
2894
2894
|
*/
|
|
2895
2895
|
|
|
2896
|
+
/**
|
|
2897
|
+
* Takes an item or an array of items and returns an array of items
|
|
2898
|
+
*
|
|
2899
|
+
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
2900
|
+
* 2) Undefined returns empty array
|
|
2901
|
+
* 3) Array returns itself
|
|
2902
|
+
*
|
|
2903
|
+
* @private internal utility
|
|
2904
|
+
*/
|
|
2905
|
+
function arrayableToArray(input) {
|
|
2906
|
+
if (input === undefined) {
|
|
2907
|
+
return [];
|
|
2908
|
+
}
|
|
2909
|
+
if (input instanceof Array) {
|
|
2910
|
+
return input;
|
|
2911
|
+
}
|
|
2912
|
+
return [input];
|
|
2913
|
+
}
|
|
2914
|
+
|
|
2896
2915
|
/**
|
|
2897
2916
|
* Predefined profiles for LLM providers to maintain consistency across the application
|
|
2898
2917
|
* These profiles represent each provider as a virtual persona in chat interfaces
|
|
@@ -2973,12 +2992,10 @@
|
|
|
2973
2992
|
/**
|
|
2974
2993
|
* Gets array of execution tools in order of priority
|
|
2975
2994
|
*/
|
|
2976
|
-
constructor(...llmExecutionTools) {
|
|
2995
|
+
constructor(title, ...llmExecutionTools) {
|
|
2996
|
+
this.title = title;
|
|
2977
2997
|
this.llmExecutionTools = llmExecutionTools;
|
|
2978
2998
|
}
|
|
2979
|
-
get title() {
|
|
2980
|
-
return 'Multiple LLM Providers';
|
|
2981
|
-
}
|
|
2982
2999
|
get description() {
|
|
2983
3000
|
const innerModelsTitlesAndDescriptions = this.llmExecutionTools
|
|
2984
3001
|
.map(({ title, description }, index) => {
|
|
@@ -3064,7 +3081,7 @@
|
|
|
3064
3081
|
return await llmExecutionTools.callEmbeddingModel(prompt);
|
|
3065
3082
|
// <- case [🤖]:
|
|
3066
3083
|
default:
|
|
3067
|
-
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
|
|
3084
|
+
throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
|
|
3068
3085
|
}
|
|
3069
3086
|
}
|
|
3070
3087
|
catch (error) {
|
|
@@ -3085,7 +3102,7 @@
|
|
|
3085
3102
|
// 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
|
|
3086
3103
|
// 3) ...
|
|
3087
3104
|
spaceTrim__default["default"]((block) => `
|
|
3088
|
-
All execution tools failed:
|
|
3105
|
+
All execution tools of ${this.title} failed:
|
|
3089
3106
|
|
|
3090
3107
|
${block(errors
|
|
3091
3108
|
.map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
|
|
@@ -3094,11 +3111,11 @@
|
|
|
3094
3111
|
`));
|
|
3095
3112
|
}
|
|
3096
3113
|
else if (this.llmExecutionTools.length === 0) {
|
|
3097
|
-
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools
|
|
3114
|
+
throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
|
|
3098
3115
|
}
|
|
3099
3116
|
else {
|
|
3100
3117
|
throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
|
|
3101
|
-
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
|
|
3118
|
+
You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
|
|
3102
3119
|
|
|
3103
3120
|
Available \`LlmExecutionTools\`:
|
|
3104
3121
|
${block(this.description)}
|
|
@@ -3128,7 +3145,7 @@
|
|
|
3128
3145
|
*
|
|
3129
3146
|
* @public exported from `@promptbook/core`
|
|
3130
3147
|
*/
|
|
3131
|
-
function joinLlmExecutionTools(...llmExecutionTools) {
|
|
3148
|
+
function joinLlmExecutionTools(title, ...llmExecutionTools) {
|
|
3132
3149
|
if (llmExecutionTools.length === 0) {
|
|
3133
3150
|
const warningMessage = spaceTrim__default["default"](`
|
|
3134
3151
|
You have not provided any \`LlmExecutionTools\`
|
|
@@ -3160,30 +3177,27 @@
|
|
|
3160
3177
|
};
|
|
3161
3178
|
*/
|
|
3162
3179
|
}
|
|
3163
|
-
return new MultipleLlmExecutionTools(...llmExecutionTools);
|
|
3180
|
+
return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
|
|
3164
3181
|
}
|
|
3165
3182
|
/**
|
|
3166
3183
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
3167
3184
|
*/
|
|
3168
3185
|
|
|
3169
3186
|
/**
|
|
3170
|
-
*
|
|
3171
|
-
*
|
|
3172
|
-
* 1) Any item except array and undefined returns array with that one item (also null)
|
|
3173
|
-
* 2) Undefined returns empty array
|
|
3174
|
-
* 3) Array returns itself
|
|
3187
|
+
* Just returns the given `LlmExecutionTools` or joins multiple into one
|
|
3175
3188
|
*
|
|
3176
|
-
* @
|
|
3189
|
+
* @public exported from `@promptbook/core`
|
|
3177
3190
|
*/
|
|
3178
|
-
function
|
|
3179
|
-
|
|
3180
|
-
|
|
3181
|
-
|
|
3182
|
-
|
|
3183
|
-
|
|
3184
|
-
}
|
|
3185
|
-
return [input];
|
|
3191
|
+
function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
|
|
3192
|
+
const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
|
|
3193
|
+
const llmTools = _llms.length === 1
|
|
3194
|
+
? _llms[0]
|
|
3195
|
+
: joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
|
|
3196
|
+
return llmTools;
|
|
3186
3197
|
}
|
|
3198
|
+
/**
|
|
3199
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
3200
|
+
*/
|
|
3187
3201
|
|
|
3188
3202
|
/**
|
|
3189
3203
|
* Prepares the persona for the pipeline
|
|
@@ -3202,8 +3216,7 @@
|
|
|
3202
3216
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
3203
3217
|
tools,
|
|
3204
3218
|
});
|
|
3205
|
-
const
|
|
3206
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
3219
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
3207
3220
|
const availableModels = (await llmTools.listModels())
|
|
3208
3221
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
3209
3222
|
.map(({ modelName, modelDescription }) => ({
|
|
@@ -3247,6 +3260,7 @@
|
|
|
3247
3260
|
};
|
|
3248
3261
|
}
|
|
3249
3262
|
/**
|
|
3263
|
+
* TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
|
|
3250
3264
|
* TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
|
|
3251
3265
|
* TODO: [🏢] Check validity of `modelName` in pipeline
|
|
3252
3266
|
* TODO: [🏢] Check validity of `systemMessage` in pipeline
|
|
@@ -4365,9 +4379,7 @@
|
|
|
4365
4379
|
if (tools === undefined || tools.llm === undefined) {
|
|
4366
4380
|
throw new MissingToolsError('LLM tools are required for preparing the pipeline');
|
|
4367
4381
|
}
|
|
4368
|
-
|
|
4369
|
-
const _llms = arrayableToArray(tools.llm);
|
|
4370
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
4382
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
4371
4383
|
const llmToolsWithUsage = countUsage(llmTools);
|
|
4372
4384
|
// <- TODO: [🌯]
|
|
4373
4385
|
/*
|
|
@@ -5527,9 +5539,7 @@
|
|
|
5527
5539
|
$scriptPipelineExecutionErrors: [],
|
|
5528
5540
|
$failedResults: [], // Track all failed attempts
|
|
5529
5541
|
};
|
|
5530
|
-
|
|
5531
|
-
const _llms = arrayableToArray(tools.llm);
|
|
5532
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
5542
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
5533
5543
|
attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
|
|
5534
5544
|
const isJokerAttempt = attemptIndex < 0;
|
|
5535
5545
|
const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
|
|
@@ -6049,9 +6059,7 @@
|
|
|
6049
6059
|
return ''; // <- Note: Np knowledge present, return empty string
|
|
6050
6060
|
}
|
|
6051
6061
|
try {
|
|
6052
|
-
|
|
6053
|
-
const _llms = arrayableToArray(tools.llm);
|
|
6054
|
-
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
6062
|
+
const llmTools = getSingleLlmExecutionTools(tools.llm);
|
|
6055
6063
|
const taskEmbeddingPrompt = {
|
|
6056
6064
|
title: 'Knowledge Search',
|
|
6057
6065
|
modelRequirements: {
|
|
@@ -6652,13 +6660,13 @@
|
|
|
6652
6660
|
// Calculate and update tldr based on pipeline progress
|
|
6653
6661
|
const cv = newOngoingResult;
|
|
6654
6662
|
// Calculate progress based on parameters resolved vs total parameters
|
|
6655
|
-
const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
|
|
6663
|
+
const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
|
|
6656
6664
|
let resolvedParameters = 0;
|
|
6657
6665
|
let currentTaskTitle = '';
|
|
6658
6666
|
// Get the resolved parameters from output parameters
|
|
6659
6667
|
if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
|
|
6660
6668
|
// Count how many output parameters have non-empty values
|
|
6661
|
-
resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6669
|
+
resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
|
|
6662
6670
|
}
|
|
6663
6671
|
// Try to determine current task from execution report
|
|
6664
6672
|
if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
|
|
@@ -6914,7 +6922,7 @@
|
|
|
6914
6922
|
* @public exported from `@promptbook/core`
|
|
6915
6923
|
*/
|
|
6916
6924
|
function createLlmToolsFromConfiguration(configuration, options = {}) {
|
|
6917
|
-
const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
|
6925
|
+
const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
|
|
6918
6926
|
const llmTools = configuration.map((llmConfiguration) => {
|
|
6919
6927
|
const registeredItem = $llmToolsRegister
|
|
6920
6928
|
.list()
|
|
@@ -6946,7 +6954,7 @@
|
|
|
6946
6954
|
...llmConfiguration.options,
|
|
6947
6955
|
});
|
|
6948
6956
|
});
|
|
6949
|
-
return joinLlmExecutionTools(...llmTools);
|
|
6957
|
+
return joinLlmExecutionTools(title, ...llmTools);
|
|
6950
6958
|
}
|
|
6951
6959
|
/**
|
|
6952
6960
|
* TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
|
|
@@ -8195,8 +8203,11 @@
|
|
|
8195
8203
|
if (isAnonymous === true) {
|
|
8196
8204
|
// Note: Anonymous mode
|
|
8197
8205
|
// TODO: Maybe check that configuration is not empty
|
|
8198
|
-
const { llmToolsConfiguration } = identification;
|
|
8199
|
-
llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
|
|
8206
|
+
const { userId, llmToolsConfiguration } = identification;
|
|
8207
|
+
llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
|
|
8208
|
+
title: `LLM Tools for anonymous user "${userId}" on server`,
|
|
8209
|
+
isVerbose,
|
|
8210
|
+
});
|
|
8200
8211
|
}
|
|
8201
8212
|
else if (isAnonymous === false && createLlmExecutionTools !== null) {
|
|
8202
8213
|
// Note: Application mode
|