@promptbook/pdf 0.101.0-18 โ†’ 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +44 -39
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  5. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  6. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  7. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  8. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  9. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  10. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  11. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  12. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  13. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  15. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  16. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  17. package/esm/typings/src/version.d.ts +1 -1
  18. package/package.json +2 -2
  19. package/umd/index.umd.js +44 -39
  20. package/umd/index.umd.js.map +1 -1
  21. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  22. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  23. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  24. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  25. /package/esm/typings/src/{cli/test/ptbk.test.d.ts โ†’ llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -120,6 +120,7 @@ import { createLlmToolsFromConfiguration } from '../llm-providers/_common/regist
120
120
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
121
121
  import { countUsage } from '../llm-providers/_common/utils/count-total-usage/countUsage';
122
122
  import { limitTotalUsage } from '../llm-providers/_common/utils/count-total-usage/limitTotalUsage';
123
+ import { getSingleLlmExecutionTools } from '../llm-providers/_multiple/getSingleLlmExecutionTools';
123
124
  import { joinLlmExecutionTools } from '../llm-providers/_multiple/joinLlmExecutionTools';
124
125
  import { MultipleLlmExecutionTools } from '../llm-providers/_multiple/MultipleLlmExecutionTools';
125
126
  import { AgentLlmExecutionTools } from '../llm-providers/agent/AgentLlmExecutionTools';
@@ -291,6 +292,7 @@ export { createLlmToolsFromConfiguration };
291
292
  export { cacheLlmTools };
292
293
  export { countUsage };
293
294
  export { limitTotalUsage };
295
+ export { getSingleLlmExecutionTools };
294
296
  export { joinLlmExecutionTools };
295
297
  export { MultipleLlmExecutionTools };
296
298
  export { AgentLlmExecutionTools };
@@ -8,3 +8,6 @@ import { string_agent_name, string_url_image } from '../../types/typeAliases';
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
10
  export declare function generatePlaceholderAgentProfileImageUrl(agentName?: string_agent_name): string_url_image;
11
+ /**
12
+ * TODO: [๐Ÿคน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
13
+ */
@@ -31,7 +31,13 @@ export type MockedChatDelayConfig = {
31
31
  *
32
32
  * @public exported from `@promptbook/components`
33
33
  */
34
- export type MockedChatProps = ChatProps & {
34
+ export type MockedChatProps = Omit<ChatProps, 'onReset' | /*'onMessage' | */ 'onUseTemplate' | 'isVoiceRecognitionButtonShown'> & {
35
+ /**
36
+ * Whether to show the reset button
37
+ *
38
+ * @default false
39
+ */
40
+ isResetShown?: boolean;
35
41
  /**
36
42
  * Optional delays configuration for emulating typing behavior
37
43
  */
@@ -1,8 +1,7 @@
1
1
  import type { PartialDeep, Promisable, ReadonlyDeep, WritableDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_parameter_name } from '../../types/typeAliases';
4
+ import type { Parameters, string_parameter_name } from '../../types/typeAliases';
6
5
  import type { TODO_string } from '../../utils/organization/TODO_string';
7
6
  import type { ExecutionReportJson } from '../execution-report/ExecutionReportJson';
8
7
  import type { PipelineExecutorResult } from '../PipelineExecutorResult';
@@ -1,9 +1,7 @@
1
1
  import type { ReadonlyDeep } from 'type-fest';
2
2
  import type { PipelineJson } from '../../pipeline/PipelineJson/PipelineJson';
3
3
  import type { TaskJson } from '../../pipeline/PipelineJson/TaskJson';
4
- import type { Parameters } from '../../types/typeAliases';
5
- import type { string_markdown } from '../../types/typeAliases';
6
- import type { string_parameter_value } from '../../types/typeAliases';
4
+ import type { Parameters, string_markdown, string_parameter_value } from '../../types/typeAliases';
7
5
  import type { ExecutionTools } from '../ExecutionTools';
8
6
  /**
9
7
  * Options for retrieving relevant knowledge for a specific task during pipeline execution.
@@ -1,7 +1,6 @@
1
1
  import { Promisable } from 'type-fest';
2
2
  import type { Identification } from '../../../remote-server/socket-types/_subtypes/Identification';
3
- import type { string_app_id } from '../../../types/typeAliases';
4
- import type { string_url } from '../../../types/typeAliases';
3
+ import type { string_app_id, string_url } from '../../../types/typeAliases';
5
4
  import type { really_any } from '../../../utils/organization/really_any';
6
5
  import type { CacheLlmToolsOptions } from '../utils/cache/CacheLlmToolsOptions';
7
6
  import type { LlmExecutionToolsWithTotalUsage } from '../utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
@@ -1,4 +1,4 @@
1
- import type { string_user_id } from '../../../types/typeAliases';
1
+ import type { string_markdown_text, string_mime_type_with_wildcard, string_user_id } from '../../../types/typeAliases';
2
2
  import { MultipleLlmExecutionTools } from '../../_multiple/MultipleLlmExecutionTools';
3
3
  import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
4
  /**
@@ -7,12 +7,18 @@ import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
7
7
  * @private internal type for `$provideLlmToolsFromEnv` and `$provideLlmToolsForTestingAndScriptsAndPlayground`
8
8
  */
9
9
  export type CreateLlmToolsFromConfigurationOptions = {
10
+ /**
11
+ * Title of the LLM tools
12
+ *
13
+ * @default 'LLM Tools from Configuration'
14
+ */
15
+ readonly title?: string_mime_type_with_wildcard & string_markdown_text;
10
16
  /**
11
17
  * This will will be passed to the created `LlmExecutionTools`
12
18
  *
13
19
  * @default false
14
20
  */
15
- isVerbose?: boolean;
21
+ readonly isVerbose?: boolean;
16
22
  /**
17
23
  * Identifier of the end user
18
24
  *
@@ -1,16 +1,8 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
- import type { ChatPromptResult } from '../../execution/PromptResult';
4
- import type { CompletionPromptResult } from '../../execution/PromptResult';
5
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
6
- import type { PromptResult } from '../../execution/PromptResult';
7
- import type { ChatPrompt } from '../../types/Prompt';
8
- import type { CompletionPrompt } from '../../types/Prompt';
9
- import type { EmbeddingPrompt } from '../../types/Prompt';
10
- import type { Prompt } from '../../types/Prompt';
11
- import type { string_markdown } from '../../types/typeAliases';
12
- import type { string_markdown_text } from '../../types/typeAliases';
13
- import type { string_title } from '../../types/typeAliases';
3
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
5
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
14
6
  /**
15
7
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
16
8
  *
@@ -18,6 +10,7 @@ import type { string_title } from '../../types/typeAliases';
18
10
  * @public exported from `@promptbook/core`
19
11
  */
20
12
  export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
13
+ readonly title: string_title & string_markdown_text;
21
14
  /**
22
15
  * Array of execution tools in order of priority
23
16
  */
@@ -25,8 +18,7 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
25
18
  /**
26
19
  * Gets array of execution tools in order of priority
27
20
  */
28
- constructor(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
29
- get title(): string_title & string_markdown_text;
21
+ constructor(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>);
30
22
  get description(): string_markdown;
31
23
  get profile(): {
32
24
  name: string;
@@ -0,0 +1,11 @@
1
+ import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
+ /**
4
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
5
+ *
6
+ * @public exported from `@promptbook/core`
7
+ */
8
+ export declare function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools: undefined | LlmExecutionTools | ReadonlyArray<LlmExecutionTools>): LlmExecutionTools | MultipleLlmExecutionTools;
9
+ /**
10
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
11
+ */
@@ -1,4 +1,5 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
+ import { string_markdown_text, string_title } from '../../types/typeAliases';
2
3
  import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
3
4
  /**
4
5
  * Joins multiple LLM Execution Tools into one
@@ -15,7 +16,7 @@ import { MultipleLlmExecutionTools } from './MultipleLlmExecutionTools';
15
16
  *
16
17
  * @public exported from `@promptbook/core`
17
18
  */
18
- export declare function joinLlmExecutionTools(...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
+ export declare function joinLlmExecutionTools(title: string_title & string_markdown_text, ...llmExecutionTools: ReadonlyArray<LlmExecutionTools>): MultipleLlmExecutionTools;
19
20
  /**
20
21
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
21
22
  */
@@ -11,5 +11,6 @@ import type { string_postprocessing_function_name } from '../../types/typeAliase
11
11
  */
12
12
  export declare function $fakeTextToExpectations(expectations: Expectations, postprocessingFunctionNames?: ReadonlyArray<string_postprocessing_function_name>): Promise<string>;
13
13
  /**
14
+ * TODO: Do not use LoremIpsum, but use some faked text that looks more human-promptbook-like
14
15
  * TODO: [๐Ÿ’] Unite object for expecting amount and format - use here also a format
15
16
  */
@@ -1,12 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult } from '../../execution/PromptResult';
6
5
  import type { Prompt } from '../../types/Prompt';
7
- import type { string_markdown } from '../../types/typeAliases';
8
- import type { string_markdown_text } from '../../types/typeAliases';
9
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
10
7
  /**
11
8
  * Mocked execution Tools for just echoing the requests for testing purposes.
12
9
  *
@@ -1,13 +1,9 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult } from '../../execution/PromptResult';
5
- import type { CompletionPromptResult } from '../../execution/PromptResult';
6
- import type { EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
7
5
  import type { Prompt } from '../../types/Prompt';
8
- import type { string_markdown } from '../../types/typeAliases';
9
- import type { string_markdown_text } from '../../types/typeAliases';
10
- import type { string_title } from '../../types/typeAliases';
6
+ import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
11
7
  /**
12
8
  * Mocked execution Tools for just faking expected responses for testing purposes
13
9
  *
@@ -1,6 +1,5 @@
1
1
  import type { KnowledgePiecePreparedJson } from '../../pipeline/PipelineJson/KnowledgePieceJson';
2
- import type { Scraper } from '../_common/Scraper';
3
- import type { ScraperSourceHandler } from '../_common/Scraper';
2
+ import type { Scraper, ScraperSourceHandler } from '../_common/Scraper';
4
3
  import type { ExecutionTools } from '../../execution/ExecutionTools';
5
4
  import type { PrepareAndScrapeOptions } from '../../prepare/PrepareAndScrapeOptions';
6
5
  import type { ScraperAndConverterMetadata } from '../_common/register/ScraperAndConverterMetadata';
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-17`).
18
+ * It follows semantic versioning (e.g., `0.101.0-18`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/pdf",
3
- "version": "0.101.0-18",
3
+ "version": "0.101.0-19",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -94,7 +94,7 @@
94
94
  "module": "./esm/index.es.js",
95
95
  "typings": "./esm/typings/src/_packages/pdf.index.d.ts",
96
96
  "peerDependencies": {
97
- "@promptbook/core": "0.101.0-18"
97
+ "@promptbook/core": "0.101.0-19"
98
98
  },
99
99
  "dependencies": {
100
100
  "crypto": "1.0.1",
package/umd/index.umd.js CHANGED
@@ -24,7 +24,7 @@
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-18';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -2960,6 +2960,25 @@
2960
2960
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
2961
2961
  */
2962
2962
 
2963
+ /**
2964
+ * Takes an item or an array of items and returns an array of items
2965
+ *
2966
+ * 1) Any item except array and undefined returns array with that one item (also null)
2967
+ * 2) Undefined returns empty array
2968
+ * 3) Array returns itself
2969
+ *
2970
+ * @private internal utility
2971
+ */
2972
+ function arrayableToArray(input) {
2973
+ if (input === undefined) {
2974
+ return [];
2975
+ }
2976
+ if (input instanceof Array) {
2977
+ return input;
2978
+ }
2979
+ return [input];
2980
+ }
2981
+
2963
2982
  /**
2964
2983
  * Predefined profiles for LLM providers to maintain consistency across the application
2965
2984
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -3040,12 +3059,10 @@
3040
3059
  /**
3041
3060
  * Gets array of execution tools in order of priority
3042
3061
  */
3043
- constructor(...llmExecutionTools) {
3062
+ constructor(title, ...llmExecutionTools) {
3063
+ this.title = title;
3044
3064
  this.llmExecutionTools = llmExecutionTools;
3045
3065
  }
3046
- get title() {
3047
- return 'Multiple LLM Providers';
3048
- }
3049
3066
  get description() {
3050
3067
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3051
3068
  .map(({ title, description }, index) => {
@@ -3131,7 +3148,7 @@
3131
3148
  return await llmExecutionTools.callEmbeddingModel(prompt);
3132
3149
  // <- case [๐Ÿค–]:
3133
3150
  default:
3134
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3151
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3135
3152
  }
3136
3153
  }
3137
3154
  catch (error) {
@@ -3152,7 +3169,7 @@
3152
3169
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3153
3170
  // 3) ...
3154
3171
  spaceTrim__default["default"]((block) => `
3155
- All execution tools failed:
3172
+ All execution tools of ${this.title} failed:
3156
3173
 
3157
3174
  ${block(errors
3158
3175
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3161,11 +3178,11 @@
3161
3178
  `));
3162
3179
  }
3163
3180
  else if (this.llmExecutionTools.length === 0) {
3164
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3181
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3165
3182
  }
3166
3183
  else {
3167
3184
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3168
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3185
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3169
3186
 
3170
3187
  Available \`LlmExecutionTools\`:
3171
3188
  ${block(this.description)}
@@ -3195,7 +3212,7 @@
3195
3212
  *
3196
3213
  * @public exported from `@promptbook/core`
3197
3214
  */
3198
- function joinLlmExecutionTools(...llmExecutionTools) {
3215
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3199
3216
  if (llmExecutionTools.length === 0) {
3200
3217
  const warningMessage = spaceTrim__default["default"](`
3201
3218
  You have not provided any \`LlmExecutionTools\`
@@ -3227,30 +3244,27 @@
3227
3244
  };
3228
3245
  */
3229
3246
  }
3230
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3247
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3231
3248
  }
3232
3249
  /**
3233
3250
  * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
3234
3251
  */
3235
3252
 
3236
3253
  /**
3237
- * Takes an item or an array of items and returns an array of items
3238
- *
3239
- * 1) Any item except array and undefined returns array with that one item (also null)
3240
- * 2) Undefined returns empty array
3241
- * 3) Array returns itself
3254
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3242
3255
  *
3243
- * @private internal utility
3256
+ * @public exported from `@promptbook/core`
3244
3257
  */
3245
- function arrayableToArray(input) {
3246
- if (input === undefined) {
3247
- return [];
3248
- }
3249
- if (input instanceof Array) {
3250
- return input;
3251
- }
3252
- return [input];
3258
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3259
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3260
+ const llmTools = _llms.length === 1
3261
+ ? _llms[0]
3262
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3263
+ return llmTools;
3253
3264
  }
3265
+ /**
3266
+ * TODO: [๐Ÿ‘ทโ€โ™‚๏ธ] @@@ Manual about construction of llmTools
3267
+ */
3254
3268
 
3255
3269
  /**
3256
3270
  * Prepares the persona for the pipeline
@@ -3269,8 +3283,7 @@
3269
3283
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3270
3284
  tools,
3271
3285
  });
3272
- const _llms = arrayableToArray(tools.llm);
3273
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3286
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3274
3287
  const availableModels = (await llmTools.listModels())
3275
3288
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3276
3289
  .map(({ modelName, modelDescription }) => ({
@@ -4042,9 +4055,7 @@
4042
4055
  if (tools === undefined || tools.llm === undefined) {
4043
4056
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4044
4057
  }
4045
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
4046
- const _llms = arrayableToArray(tools.llm);
4047
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4058
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4048
4059
  const llmToolsWithUsage = countUsage(llmTools);
4049
4060
  // <- TODO: [๐ŸŒฏ]
4050
4061
  /*
@@ -5187,9 +5198,7 @@
5187
5198
  $scriptPipelineExecutionErrors: [],
5188
5199
  $failedResults: [], // Track all failed attempts
5189
5200
  };
5190
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
5191
- const _llms = arrayableToArray(tools.llm);
5192
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5201
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5193
5202
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5194
5203
  const isJokerAttempt = attemptIndex < 0;
5195
5204
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5709,9 +5718,7 @@
5709
5718
  return ''; // <- Note: Np knowledge present, return empty string
5710
5719
  }
5711
5720
  try {
5712
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
5713
- const _llms = arrayableToArray(tools.llm);
5714
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5721
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5715
5722
  const taskEmbeddingPrompt = {
5716
5723
  title: 'Knowledge Search',
5717
5724
  modelRequirements: {
@@ -6428,9 +6435,7 @@
6428
6435
  throw new MissingToolsError('LLM tools are required for scraping external files');
6429
6436
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6430
6437
  }
6431
- // TODO: [๐Ÿš] Make arrayable LLMs -> single LLM DRY
6432
- const _llms = arrayableToArray(llm);
6433
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6438
+ const llmTools = getSingleLlmExecutionTools(llm);
6434
6439
  // TODO: [๐ŸŒผ] In future use `ptbk make` and made getPipelineCollection
6435
6440
  const collection = createCollectionFromJson(...PipelineCollection);
6436
6441
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({