@promptbook/wizard 0.101.0-18 โ†’ 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +50 -43
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  5. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  6. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  7. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  8. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  9. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  10. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  11. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  12. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  16. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  17. package/esm/typings/src/version.d.ts +1 -1
  18. package/package.json +2 -2
  19. package/umd/index.umd.js +50 -43
  20. package/umd/index.umd.js.map +1 -1
  21. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  22. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  23. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  24. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  25. /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ†’ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
120
120
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
121
121
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
122
122
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
123
+ import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
123
124
  import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
124
125
  import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
125
126
  import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
291
292
  export { cacheLlmTools };
292
293
  export { countUsage };
293
294
  export { limitTotalUsage };
295
+ export { getSingleLlmExecutionTools };
294
296
  export { joinLlmExecutionTools };
295
297
  export { MultipleLlmExecutionTools };
296
298
  export { AgentLlmExecutionTools };
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
10
  export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
11
+ /**
12
+ * TODO: [๐Ÿคน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
13
+ */
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
31
31
  *
32
32
  * @public exported from `@promptbook/components`
33
33
  */
34
- export type MockedChatProps = ChatProps & {
34
+ export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
35
+ /**
36
+ * Whether to show the reset button
37
+ *
38
+ * @default false
39
+ */
40
+ isResetShown?: boolean;
35
41
  /**
36
42
  * Optional delays configuration for emulating typing behavior
37
43
  */
@@ -1,8 +1,7 @@
1
1
  import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_parameter_name } from '../../types/typeAliases';
4
+ import type { Parameters, string_parameter_name } from '../../types/typeAliases';
6
5
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
6
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
7
  import type { PipelineExecutorResult } from '../PipelineExecutorResult';
@@ -1,9 +1,7 @@
1
1
  import type { ReadonlyDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_markdown } from '../../types/typeAliases';
6
- import type { string_parameter_value } from '../../types/typeAliases';
4
+ import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
7
5
  import type { ExecutionTools } from '../ExecutionTools';
8
6
  /**
9
7
  * Options for retrieving relevant knowledge for a specific task during pipeline execution.
@@ -1,7 +1,6 @@
1
1
  import { Promisable } from 'type-fest';
2
2
  import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
3
- import type { string_app_id } from '../../../types/typeAliases';
4
- import type { string_url } from '../../../types/typeAliases';
3
+ import type { string_app_id, string_url } from '../../../types/typeAliases';
5
4
  import type { really_any } from '../../../utils/organization/really_any';
6
5
  import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
7
6
  import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
@@ -1,4 +1,4 @@
1
- import type { string_user_id } from '../../../types/typeAliases';
1
+ import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
2
2
  import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
7
7
  * @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
8
8
  */
9
9
  export type CreateLlmToolsFromConfigurationOptions = {
10
+ /**
11
+ * Title of the LLM tools
12
+ *
13
+ * @default 'LLM Tools from Configuration'
14
+ */
15
+ readonly title?: string_mime_type_with_wildcard & string_markdown_text;
10
16
  /**
11
17
  * This will will be passed to the created `LlmExecutionTools`
12
18
  *
13
19
  * @default false
14
20
  */
15
- isVerbose?: boolean;
21
+ readonly isVerbose?: boolean;
16
22
  /**
17
23
  * Identifier of the end user
18
24
  *
@@ -1,16 +1,8 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { ChatPromptResult } from '../../execution/PromptResult';
4
- import type { CompletionPromptResult } from '../../execution/PromptResult';
5
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
6
- import type { PromptResult } from '../../execution/PromptResult';
7
- import type { ChatPrompt } from '../../types/Prompt';
8
- import type { CompletionPrompt } from '../../types/Prompt';
9
- import type { EmbeddingPrompt } from '../../types/Prompt';
10
- import type { Prompt } from '../../types/Prompt';
11
- import type { string_markdown } from '../../types/typeAliases';
12
- import type { string_markdown_text } from '../../types/typeAliases';
13
- import type { string_title } from '../../types/typeAliases';
3
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
5
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
14
6
  /**
15
7
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
16
8
  *
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
18
10
  * @public exported from `@promptbook/core`
19
11
  */
20
12
  export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
13
+ readonly title: string_title & string_markdown_text;
21
14
  /**
22
15
  * Array of execution tools in order of priority
23
16
  */
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
25
18
  /**
26
19
  * Gets array of execution tools in order of priority
27
20
  */
28
- constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
29
- get title(): string_title & string_markdown_text;
21
+ constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
30
22
  get description(): string_markdown;
31
23
  get profile(): {
32
24
  name: string;
@@ -0,0 +1,11 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
+ /**
4
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
9
+ /**
10
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
11
+ */
@@ -1,4 +1,5 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { string_markdown_text, string_title } from '../../types/typeAliases';
2
3
  import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
4
  /**
4
5
  * Joins multiple LLM Execution Tools into one
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
15
16
  *
16
17
  * @public exported from `@promptbook/core`
17
18
  */
18
- export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
+ export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
20
  /**
20
21
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
21
22
  */
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
11
11
  */
12
12
  export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
13
13
  /**
14
+ * TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
14
15
  * TODO: [๐Ÿ’] Unite object for expecting amount and format - use here also a format
15
16
  */
@@ -1,12 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
- import type { string_markdown } from '../../types/typeAliases';
8
- import type { string_markdown_text } from '../../types/typeAliases';
9
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
10
7
  /**
11
8
  * Mocked execution Tools for just echoing the requests for testing purposes.
12
9
  *
@@ -1,13 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
7
5
  import type { Prompt } from '../../types/Prompt';
8
- import type { string_markdown } from '../../types/typeAliases';
9
- import type { string_markdown_text } from '../../types/typeAliases';
10
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
11
7
  /**
12
8
  * Mocked execution Tools for just faking expected responses for testing purposes
13
9
  *
@@ -1,6 +1,5 @@
1
1
  import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
2
- import type { Scraper } from '../_common/Scraper';
3
- import type { ScraperSourceHandler } from '../_common/Scraper';
2
+ import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
4
3
  import type { ExecutionTools } from '../../execution/ExecutionTools';
5
4
  import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
6
5
  import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-17`).
18
+ * It follows semantic versioning (e.g., `0.101.0-18`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/wizard",
3
- "version": "0.101.0-18",
3
+ "version": "0.101.0-19",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/wizard.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.101.0-18"
98
+ "@promptbook/core": "0.101.0-19"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/deepseek": "0.1.6",
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-18';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -7856,6 +7856,25 @@
7856
7856
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
7857
7857
  */
7858
7858
 
7859
+ /**
7860
+ * Takes an item or an array of items and returns an array of items
7861
+ *
7862
+ * 1) Any item except array and undefined returns array with that one item (also null)
7863
+ * 2) Undefined returns empty array
7864
+ * 3) Array returns itself
7865
+ *
7866
+ * @private internal utility
7867
+ */
7868
+ function arrayableToArray(input) {
7869
+ if (input === undefined) {
7870
+ return [];
7871
+ }
7872
+ if (input instanceof Array) {
7873
+ return input;
7874
+ }
7875
+ return [input];
7876
+ }
7877
+
7859
7878
  /**
7860
7879
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
7861
7880
  *
@@ -7866,12 +7885,10 @@
7866
7885
  /**
7867
7886
  * Gets array of execution tools in order of priority
7868
7887
  */
7869
- constructor(...llmExecutionTools) {
7888
+ constructor(title, ...llmExecutionTools) {
7889
+ this.title = title;
7870
7890
  this.llmExecutionTools = llmExecutionTools;
7871
7891
  }
7872
- get title() {
7873
- return 'Multiple LLM Providers';
7874
- }
7875
7892
  get description() {
7876
7893
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
7877
7894
  .map(({ title, description }, index) => {
@@ -7957,7 +7974,7 @@
7957
7974
  return await llmExecutionTools.callEmbeddingModel(prompt);
7958
7975
  // <- case [๐Ÿค–]:
7959
7976
  default:
7960
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
7977
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
7961
7978
  }
7962
7979
  }
7963
7980
  catch (error) {
@@ -7978,7 +7995,7 @@
7978
7995
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
7979
7996
  // 3) ...
7980
7997
  spaceTrim__default["default"]((block) => `
7981
- All execution tools failed:
7998
+ All execution tools of ${this.title} failed:
7982
7999
 
7983
8000
  ${block(errors
7984
8001
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -7987,11 +8004,11 @@
7987
8004
  `));
7988
8005
  }
7989
8006
  else if (this.llmExecutionTools.length === 0) {
7990
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
8007
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
7991
8008
  }
7992
8009
  else {
7993
8010
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
7994
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
8011
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
7995
8012
 
7996
8013
  Available \`LlmExecutionTools\`:
7997
8014
  ${block(this.description)}
@@ -8021,7 +8038,7 @@
8021
8038
  *
8022
8039
  * @public exported from `@promptbook/core`
8023
8040
  */
8024
- function joinLlmExecutionTools(...llmExecutionTools) {
8041
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
8025
8042
  if (llmExecutionTools.length === 0) {
8026
8043
  const warningMessage = spaceTrim__default["default"](`
8027
8044
  You have not provided any \`LlmExecutionTools\`
@@ -8053,30 +8070,27 @@
8053
8070
  };
8054
8071
  */
8055
8072
  }
8056
- return new MultipleLlmExecutionTools(...llmExecutionTools);
8073
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
8057
8074
  }
8058
8075
  /**
8059
8076
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
8060
8077
  */
8061
8078
 
8062
8079
  /**
8063
- * Takes an item or an array of items and returns an array of items
8064
- *
8065
- * 1) Any item except array and undefined returns array with that one item (also null)
8066
- * 2) Undefined returns empty array
8067
- * 3) Array returns itself
8080
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
8068
8081
  *
8069
- * @private internal utility
8082
+ * @public exported from `@promptbook/core`
8070
8083
  */
8071
- function arrayableToArray(input) {
8072
- if (input === undefined) {
8073
- return [];
8074
- }
8075
- if (input instanceof Array) {
8076
- return input;
8077
- }
8078
- return [input];
8084
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
8085
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
8086
+ const llmTools = _llms.length === 1
8087
+ ? _llms[0]
8088
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
8089
+ return llmTools;
8079
8090
  }
8091
+ /**
8092
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
8093
+ */
8080
8094
 
8081
8095
  /**
8082
8096
  * Prepares the persona for the pipeline
@@ -8095,8 +8109,7 @@
8095
8109
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
8096
8110
  tools,
8097
8111
  });
8098
- const _llms = arrayableToArray(tools.llm);
8099
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8112
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
8100
8113
  const availableModels = (await llmTools.listModels())
8101
8114
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
8102
8115
  .map(({ modelName, modelDescription }) => ({
@@ -8713,9 +8726,7 @@
8713
8726
  if (tools === undefined || tools.llm === undefined) {
8714
8727
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
8715
8728
  }
8716
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
8717
- const _llms = arrayableToArray(tools.llm);
8718
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
8729
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
8719
8730
  const llmToolsWithUsage = countUsage(llmTools);
8720
8731
  // <- TODO: [๐ŸŒฏ]
8721
8732
  /*
@@ -9585,9 +9596,7 @@
9585
9596
  $scriptPipelineExecutionErrors: [],
9586
9597
  $failedResults: [], // Track all failed attempts
9587
9598
  };
9588
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
9589
- const _llms = arrayableToArray(tools.llm);
9590
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
9599
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
9591
9600
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
9592
9601
  const isJokerAttempt = attemptIndex < 0;
9593
9602
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -10107,9 +10116,7 @@
10107
10116
  return ''; // <- Note: Np knowledge present, return empty string
10108
10117
  }
10109
10118
  try {
10110
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
10111
- const _llms = arrayableToArray(tools.llm);
10112
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
10119
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
10113
10120
  const taskEmbeddingPrompt = {
10114
10121
  title: 'Knowledge Search',
10115
10122
  modelRequirements: {
@@ -10826,9 +10833,7 @@
10826
10833
  throw new MissingToolsError('LLM tools are required for scraping external files');
10827
10834
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
10828
10835
  }
10829
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
10830
- const _llms = arrayableToArray(llm);
10831
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
10836
+ const llmTools = getSingleLlmExecutionTools(llm);
10832
10837
  // TODO: [๐ŸŒผ] In future use `ptbk make` and made getPipelineCollection
10833
10838
  const collection = createCollectionFromJson(...PipelineCollection);
10834
10839
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({
@@ -12967,7 +12972,7 @@
12967
12972
  * @public exported from `@promptbook/core`
12968
12973
  */
12969
12974
  function createLlmToolsFromConfiguration(configuration, options = {}) {
12970
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
12975
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
12971
12976
  const llmTools = configuration.map((llmConfiguration) => {
12972
12977
  const registeredItem = $llmToolsRegister
12973
12978
  .list()
@@ -12999,7 +13004,7 @@
12999
13004
  ...llmConfiguration.options,
13000
13005
  });
13001
13006
  });
13002
- return joinLlmExecutionTools(...llmTools);
13007
+ return joinLlmExecutionTools(title, ...llmTools);
13003
13008
  }
13004
13009
  /**
13005
13010
  * TODO: [๐ŸŽŒ] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -13116,7 +13121,9 @@
13116
13121
  });
13117
13122
  }
13118
13123
  else if (strategy === 'BRING_YOUR_OWN_KEYS') {
13119
- llmExecutionTools = await $provideLlmToolsFromEnv();
13124
+ llmExecutionTools = await $provideLlmToolsFromEnv({
13125
+ title: 'LLM Tools for wizard or CLI with BYOK strategy',
13126
+ });
13120
13127
  }
13121
13128
  else {
13122
13129
  throw new UnexpectedError(`\`$provideLlmToolsForWizardOrCli\` wrong strategy "${strategy}"`);
@@ -17149,7 +17156,7 @@
17149
17156
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
17150
17157
  }
17151
17158
  const fs = $provideFilesystemForNode();
17152
- const llm = await $provideLlmToolsFromEnv(options);
17159
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
17153
17160
  const executables = await $provideExecutablesForNode();
17154
17161
  const tools = {
17155
17162
  llm,