@promptbook/remote-server 0.101.0-18 โ†’ 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +50 -40
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  5. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  6. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  7. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  8. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  9. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  10. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  11. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  12. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  16. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  17. package/esm/typings/src/version.d.ts +1 -1
  18. package/package.json +2 -2
  19. package/umd/index.umd.js +50 -40
  20. package/umd/index.umd.js.map +1 -1
  21. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  22. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  23. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  24. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  25. /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ†’ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
120
120
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
121
121
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
122
122
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
123
+ import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
123
124
  import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
124
125
  import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
125
126
  import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
291
292
  export { cacheLlmTools };
292
293
  export { countUsage };
293
294
  export { limitTotalUsage };
295
+ export { getSingleLlmExecutionTools };
294
296
  export { joinLlmExecutionTools };
295
297
  export { MultipleLlmExecutionTools };
296
298
  export { AgentLlmExecutionTools };
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
10
  export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
11
+ /**
12
+ * TODO: [๐Ÿคน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
13
+ */
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
31
31
  *
32
32
  * @public exported from `@promptbook/components`
33
33
  */
34
- export type MockedChatProps = ChatProps & {
34
+ export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
35
+ /**
36
+ * Whether to show the reset button
37
+ *
38
+ * @default false
39
+ */
40
+ isResetShown?: boolean;
35
41
  /**
36
42
  * Optional delays configuration for emulating typing behavior
37
43
  */
@@ -1,8 +1,7 @@
1
1
  import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_parameter_name } from '../../types/typeAliases';
4
+ import type { Parameters, string_parameter_name } from '../../types/typeAliases';
6
5
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
6
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
7
  import type { PipelineExecutorResult } from '../PipelineExecutorResult';
@@ -1,9 +1,7 @@
1
1
  import type { ReadonlyDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_markdown } from '../../types/typeAliases';
6
- import type { string_parameter_value } from '../../types/typeAliases';
4
+ import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
7
5
  import type { ExecutionTools } from '../ExecutionTools';
8
6
  /**
9
7
  * Options for retrieving relevant knowledge for a specific task during pipeline execution.
@@ -1,7 +1,6 @@
1
1
  import { Promisable } from 'type-fest';
2
2
  import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
3
- import type { string_app_id } from '../../../types/typeAliases';
4
- import type { string_url } from '../../../types/typeAliases';
3
+ import type { string_app_id, string_url } from '../../../types/typeAliases';
5
4
  import type { really_any } from '../../../utils/organization/really_any';
6
5
  import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
7
6
  import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
@@ -1,4 +1,4 @@
1
- import type { string_user_id } from '../../../types/typeAliases';
1
+ import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
2
2
  import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
7
7
  * @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
8
8
  */
9
9
  export type CreateLlmToolsFromConfigurationOptions = {
10
+ /**
11
+ * Title of the LLM tools
12
+ *
13
+ * @default 'LLM Tools from Configuration'
14
+ */
15
+ readonly title?: string_mime_type_with_wildcard & string_markdown_text;
10
16
  /**
11
17
  * This will will be passed to the created `LlmExecutionTools`
12
18
  *
13
19
  * @default false
14
20
  */
15
- isVerbose?: boolean;
21
+ readonly isVerbose?: boolean;
16
22
  /**
17
23
  * Identifier of the end user
18
24
  *
@@ -1,16 +1,8 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { ChatPromptResult } from '../../execution/PromptResult';
4
- import type { CompletionPromptResult } from '../../execution/PromptResult';
5
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
6
- import type { PromptResult } from '../../execution/PromptResult';
7
- import type { ChatPrompt } from '../../types/Prompt';
8
- import type { CompletionPrompt } from '../../types/Prompt';
9
- import type { EmbeddingPrompt } from '../../types/Prompt';
10
- import type { Prompt } from '../../types/Prompt';
11
- import type { string_markdown } from '../../types/typeAliases';
12
- import type { string_markdown_text } from '../../types/typeAliases';
13
- import type { string_title } from '../../types/typeAliases';
3
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
5
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
14
6
  /**
15
7
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
16
8
  *
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
18
10
  * @public exported from `@promptbook/core`
19
11
  */
20
12
  export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
13
+ readonly title: string_title & string_markdown_text;
21
14
  /**
22
15
  * Array of execution tools in order of priority
23
16
  */
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
25
18
  /**
26
19
  * Gets array of execution tools in order of priority
27
20
  */
28
- constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
29
- get title(): string_title & string_markdown_text;
21
+ constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
30
22
  get description(): string_markdown;
31
23
  get profile(): {
32
24
  name: string;
@@ -0,0 +1,11 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
+ /**
4
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
9
+ /**
10
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
11
+ */
@@ -1,4 +1,5 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { string_markdown_text, string_title } from '../../types/typeAliases';
2
3
  import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
4
  /**
4
5
  * Joins multiple LLM Execution Tools into one
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
15
16
  *
16
17
  * @public exported from `@promptbook/core`
17
18
  */
18
- export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
+ export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
20
  /**
20
21
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
21
22
  */
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
11
11
  */
12
12
  export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
13
13
  /**
14
+ * TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
14
15
  * TODO: [๐Ÿ’] Unite object for expecting amount and format - use here also a format
15
16
  */
@@ -1,12 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
- import type { string_markdown } from '../../types/typeAliases';
8
- import type { string_markdown_text } from '../../types/typeAliases';
9
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
10
7
  /**
11
8
  * Mocked execution Tools for just echoing the requests for testing purposes.
12
9
  *
@@ -1,13 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
7
5
  import type { Prompt } from '../../types/Prompt';
8
- import type { string_markdown } from '../../types/typeAliases';
9
- import type { string_markdown_text } from '../../types/typeAliases';
10
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
11
7
  /**
12
8
  * Mocked execution Tools for just faking expected responses for testing purposes
13
9
  *
@@ -1,6 +1,5 @@
1
1
  import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
2
- import type { Scraper } from '../_common/Scraper';
3
- import type { ScraperSourceHandler } from '../_common/Scraper';
2
+ import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
4
3
  import type { ExecutionTools } from '../../execution/ExecutionTools';
5
4
  import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
6
5
  import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-17`).
18
+ * It follows semantic versioning (e.g., `0.101.0-18`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.101.0-18",
3
+ "version": "0.101.0-19",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.101.0-18"
98
+ "@promptbook/core": "0.101.0-19"
99
99
  },
100
100
  "dependencies": {
101
101
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -47,7 +47,7 @@
47
47
  * @generated
48
48
  * @see https://github.com/webgptorg/promptbook
49
49
  */
50
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-18';
50
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
51
51
  /**
52
52
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
53
53
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -2893,6 +2893,25 @@
2893
2893
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
2894
2894
  */
2895
2895
 
2896
+ /**
2897
+ * Takes an item or an array of items and returns an array of items
2898
+ *
2899
+ * 1) Any item except array and undefined returns array with that one item (also null)
2900
+ * 2) Undefined returns empty array
2901
+ * 3) Array returns itself
2902
+ *
2903
+ * @private internal utility
2904
+ */
2905
+ function arrayableToArray(input) {
2906
+ if (input === undefined) {
2907
+ return [];
2908
+ }
2909
+ if (input instanceof Array) {
2910
+ return input;
2911
+ }
2912
+ return [input];
2913
+ }
2914
+
2896
2915
  /**
2897
2916
  * Predefined profiles for LLM providers to maintain consistency across the application
2898
2917
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -2973,12 +2992,10 @@
2973
2992
  /**
2974
2993
  * Gets array of execution tools in order of priority
2975
2994
  */
2976
- constructor(...llmExecutionTools) {
2995
+ constructor(title, ...llmExecutionTools) {
2996
+ this.title = title;
2977
2997
  this.llmExecutionTools = llmExecutionTools;
2978
2998
  }
2979
- get title() {
2980
- return 'Multiple LLM Providers';
2981
- }
2982
2999
  get description() {
2983
3000
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2984
3001
  .map(({ title, description }, index) => {
@@ -3064,7 +3081,7 @@
3064
3081
  return await llmExecutionTools.callEmbeddingModel(prompt);
3065
3082
  // <- case [๐Ÿค–]:
3066
3083
  default:
3067
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3084
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3068
3085
  }
3069
3086
  }
3070
3087
  catch (error) {
@@ -3085,7 +3102,7 @@
3085
3102
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3086
3103
  // 3) ...
3087
3104
  spaceTrim__default["default"]((block) => `
3088
- All execution tools failed:
3105
+ All execution tools of ${this.title} failed:
3089
3106
 
3090
3107
  ${block(errors
3091
3108
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3094,11 +3111,11 @@
3094
3111
  `));
3095
3112
  }
3096
3113
  else if (this.llmExecutionTools.length === 0) {
3097
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3114
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3098
3115
  }
3099
3116
  else {
3100
3117
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3101
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3118
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3102
3119
 
3103
3120
  Available \`LlmExecutionTools\`:
3104
3121
  ${block(this.description)}
@@ -3128,7 +3145,7 @@
3128
3145
  *
3129
3146
  * @public exported from `@promptbook/core`
3130
3147
  */
3131
- function joinLlmExecutionTools(...llmExecutionTools) {
3148
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3132
3149
  if (llmExecutionTools.length === 0) {
3133
3150
  const warningMessage = spaceTrim__default["default"](`
3134
3151
  You have not provided any \`LlmExecutionTools\`
@@ -3160,30 +3177,27 @@
3160
3177
  };
3161
3178
  */
3162
3179
  }
3163
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3180
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3164
3181
  }
3165
3182
  /**
3166
3183
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
3167
3184
  */
3168
3185
 
3169
3186
  /**
3170
- * Takes an item or an array of items and returns an array of items
3171
- *
3172
- * 1) Any item except array and undefined returns array with that one item (also null)
3173
- * 2) Undefined returns empty array
3174
- * 3) Array returns itself
3187
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3175
3188
  *
3176
- * @private internal utility
3189
+ * @public exported from `@promptbook/core`
3177
3190
  */
3178
- function arrayableToArray(input) {
3179
- if (input === undefined) {
3180
- return [];
3181
- }
3182
- if (input instanceof Array) {
3183
- return input;
3184
- }
3185
- return [input];
3191
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3192
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3193
+ const llmTools = _llms.length === 1
3194
+ ? _llms[0]
3195
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3196
+ return llmTools;
3186
3197
  }
3198
+ /**
3199
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
3200
+ */
3187
3201
 
3188
3202
  /**
3189
3203
  * Prepares the persona for the pipeline
@@ -3202,8 +3216,7 @@
3202
3216
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3203
3217
  tools,
3204
3218
  });
3205
- const _llms = arrayableToArray(tools.llm);
3206
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3219
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3207
3220
  const availableModels = (await llmTools.listModels())
3208
3221
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3209
3222
  .map(({ modelName, modelDescription }) => ({
@@ -4366,9 +4379,7 @@
4366
4379
  if (tools === undefined || tools.llm === undefined) {
4367
4380
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4368
4381
  }
4369
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
4370
- const _llms = arrayableToArray(tools.llm);
4371
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4382
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4372
4383
  const llmToolsWithUsage = countUsage(llmTools);
4373
4384
  // <- TODO: [๐ŸŒฏ]
4374
4385
  /*
@@ -5528,9 +5539,7 @@
5528
5539
  $scriptPipelineExecutionErrors: [],
5529
5540
  $failedResults: [], // Track all failed attempts
5530
5541
  };
5531
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
5532
- const _llms = arrayableToArray(tools.llm);
5533
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5542
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5534
5543
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5535
5544
  const isJokerAttempt = attemptIndex < 0;
5536
5545
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -6050,9 +6059,7 @@
6050
6059
  return ''; // <- Note: Np knowledge present, return empty string
6051
6060
  }
6052
6061
  try {
6053
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
6054
- const _llms = arrayableToArray(tools.llm);
6055
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6062
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6056
6063
  const taskEmbeddingPrompt = {
6057
6064
  title: 'Knowledge Search',
6058
6065
  modelRequirements: {
@@ -6915,7 +6922,7 @@
6915
6922
  * @public exported from `@promptbook/core`
6916
6923
  */
6917
6924
  function createLlmToolsFromConfiguration(configuration, options = {}) {
6918
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6925
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6919
6926
  const llmTools = configuration.map((llmConfiguration) => {
6920
6927
  const registeredItem = $llmToolsRegister
6921
6928
  .list()
@@ -6947,7 +6954,7 @@
6947
6954
  ...llmConfiguration.options,
6948
6955
  });
6949
6956
  });
6950
- return joinLlmExecutionTools(...llmTools);
6957
+ return joinLlmExecutionTools(title, ...llmTools);
6951
6958
  }
6952
6959
  /**
6953
6960
  * TODO: [๐ŸŽŒ] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -8196,8 +8203,11 @@
8196
8203
  if (isAnonymous === true) {
8197
8204
  // Note: Anonymous mode
8198
8205
  // TODO: Maybe check that configuration is not empty
8199
- const { llmToolsConfiguration } = identification;
8200
- llm = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose });
8206
+ const { userId, llmToolsConfiguration } = identification;
8207
+ llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
8208
+ title: `LLM Tools for anonymous user "${userId}" on server`,
8209
+ isVerbose,
8210
+ });
8201
8211
  }
8202
8212
  else if (isAnonymous === false && createLlmExecutionTools !== null) {
8203
8213
  // Note: Application mode