@promptbook/legacy-documents 0.101.0-9 → 0.101.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +57 -112
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +8 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  21. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
  22. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
  23. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
  24. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
  27. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
  28. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  30. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  33. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  34. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  35. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  43. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  44. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  45. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  46. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  47. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  48. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  51. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  52. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  53. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  54. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  55. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  56. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  57. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  58. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  59. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  60. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  61. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  62. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  63. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  64. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  65. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  66. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  67. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  68. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  69. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  70. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  71. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  72. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  73. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  74. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  75. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  76. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  77. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  78. package/esm/typings/src/version.d.ts +1 -1
  79. package/package.json +2 -2
  80. package/umd/index.umd.js +57 -112
  81. package/umd/index.umd.js.map +1 -1
  82. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  83. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  84. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  85. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  86. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  87. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  88. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-9';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1055,11 +1055,12 @@
1055
1055
  catch (error) {
1056
1056
  // Note: If we can't create cache directory, continue without it
1057
1057
  // This handles read-only filesystems, permission issues, and missing parent directories
1058
- if (error instanceof Error && (error.message.includes('EROFS') ||
1059
- error.message.includes('read-only') ||
1060
- error.message.includes('EACCES') ||
1061
- error.message.includes('EPERM') ||
1062
- error.message.includes('ENOENT'))) ;
1058
+ if (error instanceof Error &&
1059
+ (error.message.includes('EROFS') ||
1060
+ error.message.includes('read-only') ||
1061
+ error.message.includes('EACCES') ||
1062
+ error.message.includes('EPERM') ||
1063
+ error.message.includes('ENOENT'))) ;
1063
1064
  else {
1064
1065
  // Re-throw other unexpected errors
1065
1066
  throw error;
@@ -3120,75 +3121,32 @@
3120
3121
  */
3121
3122
 
3122
3123
  /**
3123
- * Predefined profiles for LLM providers to maintain consistency across the application
3124
- * These profiles represent each provider as a virtual persona in chat interfaces
3124
+ * Takes an item or an array of items and returns an array of items
3125
3125
  *
3126
- * @private !!!!
3126
+ * 1) Any item except array and undefined returns array with that one item (also null)
3127
+ * 2) Undefined returns empty array
3128
+ * 3) Array returns itself
3129
+ *
3130
+ * @private internal utility
3127
3131
  */
3128
- const LLM_PROVIDER_PROFILES = {
3129
- OPENAI: {
3130
- name: 'OPENAI',
3131
- fullname: 'OpenAI GPT',
3132
- color: '#10a37f', // OpenAI's signature green
3133
- // Note: avatarSrc could be added when we have provider logos available
3134
- },
3135
- ANTHROPIC: {
3136
- name: 'ANTHROPIC',
3137
- fullname: 'Anthropic Claude',
3138
- color: '#d97706', // Anthropic's orange/amber color
3139
- },
3140
- AZURE_OPENAI: {
3141
- name: 'AZURE_OPENAI',
3142
- fullname: 'Azure OpenAI',
3143
- color: '#0078d4', // Microsoft Azure blue
3144
- },
3145
- GOOGLE: {
3146
- name: 'GOOGLE',
3147
- fullname: 'Google Gemini',
3148
- color: '#4285f4', // Google blue
3149
- },
3150
- DEEPSEEK: {
3151
- name: 'DEEPSEEK',
3152
- fullname: 'DeepSeek',
3153
- color: '#7c3aed', // Purple color for DeepSeek
3154
- },
3155
- OLLAMA: {
3156
- name: 'OLLAMA',
3157
- fullname: 'Ollama',
3158
- color: '#059669', // Emerald green for local models
3159
- },
3160
- REMOTE: {
3161
- name: 'REMOTE',
3162
- fullname: 'Remote Server',
3163
- color: '#6b7280', // Gray for remote/proxy connections
3164
- },
3165
- MOCKED_ECHO: {
3166
- name: 'MOCKED_ECHO',
3167
- fullname: 'Echo (Test)',
3168
- color: '#8b5cf6', // Purple for test/mock tools
3169
- },
3170
- MOCKED_FAKE: {
3171
- name: 'MOCKED_FAKE',
3172
- fullname: 'Fake LLM (Test)',
3173
- color: '#ec4899', // Pink for fake/test tools
3174
- },
3175
- VERCEL: {
3176
- name: 'VERCEL',
3177
- fullname: 'Vercel AI',
3178
- color: '#000000', // Vercel's black
3179
- },
3180
- MULTIPLE: {
3181
- name: 'MULTIPLE',
3182
- fullname: 'Multiple Providers',
3183
- color: '#6366f1', // Indigo for combined/multiple providers
3184
- },
3185
- };
3132
+ function arrayableToArray(input) {
3133
+ if (input === undefined) {
3134
+ return [];
3135
+ }
3136
+ if (input instanceof Array) {
3137
+ return input;
3138
+ }
3139
+ return [input];
3140
+ }
3141
+
3186
3142
  /**
3187
- * TODO: Refactor this - each profile must be alongside the provider definition
3188
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
3189
- * Note: [💞] Ignore a discrepancy between file name and entity name
3143
+ * Profile for Multiple providers aggregation
3190
3144
  */
3191
-
3145
+ const MULTIPLE_PROVIDER_PROFILE = {
3146
+ name: 'MULTIPLE',
3147
+ fullname: 'Multiple Providers',
3148
+ color: '#6366f1',
3149
+ };
3192
3150
  /**
3193
3151
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
3194
3152
  *
@@ -3199,12 +3157,10 @@
3199
3157
  /**
3200
3158
  * Gets array of execution tools in order of priority
3201
3159
  */
3202
- constructor(...llmExecutionTools) {
3160
+ constructor(title, ...llmExecutionTools) {
3161
+ this.title = title;
3203
3162
  this.llmExecutionTools = llmExecutionTools;
3204
3163
  }
3205
- get title() {
3206
- return 'Multiple LLM Providers';
3207
- }
3208
3164
  get description() {
3209
3165
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3210
3166
  .map(({ title, description }, index) => {
@@ -3226,7 +3182,7 @@
3226
3182
  `);
3227
3183
  }
3228
3184
  get profile() {
3229
- return LLM_PROVIDER_PROFILES.MULTIPLE;
3185
+ return MULTIPLE_PROVIDER_PROFILE;
3230
3186
  }
3231
3187
  /**
3232
3188
  * Check the configuration of all execution tools
@@ -3290,7 +3246,7 @@
3290
3246
  return await llmExecutionTools.callEmbeddingModel(prompt);
3291
3247
  // <- case [🤖]:
3292
3248
  default:
3293
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3249
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3294
3250
  }
3295
3251
  }
3296
3252
  catch (error) {
@@ -3311,7 +3267,7 @@
3311
3267
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3312
3268
  // 3) ...
3313
3269
  spaceTrim__default["default"]((block) => `
3314
- All execution tools failed:
3270
+ All execution tools of ${this.title} failed:
3315
3271
 
3316
3272
  ${block(errors
3317
3273
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3320,11 +3276,11 @@
3320
3276
  `));
3321
3277
  }
3322
3278
  else if (this.llmExecutionTools.length === 0) {
3323
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3279
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3324
3280
  }
3325
3281
  else {
3326
3282
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3327
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3283
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3328
3284
 
3329
3285
  Available \`LlmExecutionTools\`:
3330
3286
  ${block(this.description)}
@@ -3354,7 +3310,7 @@
3354
3310
  *
3355
3311
  * @public exported from `@promptbook/core`
3356
3312
  */
3357
- function joinLlmExecutionTools(...llmExecutionTools) {
3313
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3358
3314
  if (llmExecutionTools.length === 0) {
3359
3315
  const warningMessage = spaceTrim__default["default"](`
3360
3316
  You have not provided any \`LlmExecutionTools\`
@@ -3386,30 +3342,27 @@
3386
3342
  };
3387
3343
  */
3388
3344
  }
3389
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3345
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3390
3346
  }
3391
3347
  /**
3392
3348
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3393
3349
  */
3394
3350
 
3395
3351
  /**
3396
- * Takes an item or an array of items and returns an array of items
3397
- *
3398
- * 1) Any item except array and undefined returns array with that one item (also null)
3399
- * 2) Undefined returns empty array
3400
- * 3) Array returns itself
3352
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3401
3353
  *
3402
- * @private internal utility
3354
+ * @public exported from `@promptbook/core`
3403
3355
  */
3404
- function arrayableToArray(input) {
3405
- if (input === undefined) {
3406
- return [];
3407
- }
3408
- if (input instanceof Array) {
3409
- return input;
3410
- }
3411
- return [input];
3356
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3357
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3358
+ const llmTools = _llms.length === 1
3359
+ ? _llms[0]
3360
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3361
+ return llmTools;
3412
3362
  }
3363
+ /**
3364
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3365
+ */
3413
3366
 
3414
3367
  /**
3415
3368
  * Prepares the persona for the pipeline
@@ -3428,8 +3381,7 @@
3428
3381
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3429
3382
  tools,
3430
3383
  });
3431
- const _llms = arrayableToArray(tools.llm);
3432
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3384
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3433
3385
  const availableModels = (await llmTools.listModels())
3434
3386
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3435
3387
  .map(({ modelName, modelDescription }) => ({
@@ -3473,6 +3425,7 @@
3473
3425
  };
3474
3426
  }
3475
3427
  /**
3428
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
3476
3429
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
3477
3430
  * TODO: [🏢] Check validity of `modelName` in pipeline
3478
3431
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -4190,9 +4143,7 @@
4190
4143
  if (tools === undefined || tools.llm === undefined) {
4191
4144
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4192
4145
  }
4193
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4194
- const _llms = arrayableToArray(tools.llm);
4195
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4146
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4196
4147
  const llmToolsWithUsage = countUsage(llmTools);
4197
4148
  // <- TODO: [🌯]
4198
4149
  /*
@@ -5335,9 +5286,7 @@
5335
5286
  $scriptPipelineExecutionErrors: [],
5336
5287
  $failedResults: [], // Track all failed attempts
5337
5288
  };
5338
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5339
- const _llms = arrayableToArray(tools.llm);
5340
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5289
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5341
5290
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5342
5291
  const isJokerAttempt = attemptIndex < 0;
5343
5292
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5857,9 +5806,7 @@
5857
5806
  return ''; // <- Note: Np knowledge present, return empty string
5858
5807
  }
5859
5808
  try {
5860
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5861
- const _llms = arrayableToArray(tools.llm);
5862
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5809
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5863
5810
  const taskEmbeddingPrompt = {
5864
5811
  title: 'Knowledge Search',
5865
5812
  modelRequirements: {
@@ -6460,13 +6407,13 @@
6460
6407
  // Calculate and update tldr based on pipeline progress
6461
6408
  const cv = newOngoingResult;
6462
6409
  // Calculate progress based on parameters resolved vs total parameters
6463
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6410
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6464
6411
  let resolvedParameters = 0;
6465
6412
  let currentTaskTitle = '';
6466
6413
  // Get the resolved parameters from output parameters
6467
6414
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6468
6415
  // Count how many output parameters have non-empty values
6469
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6416
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6470
6417
  }
6471
6418
  // Try to determine current task from execution report
6472
6419
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6576,9 +6523,7 @@
6576
6523
  throw new MissingToolsError('LLM tools are required for scraping external files');
6577
6524
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6578
6525
  }
6579
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6580
- const _llms = arrayableToArray(llm);
6581
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6526
+ const llmTools = getSingleLlmExecutionTools(llm);
6582
6527
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6583
6528
  const collection = createCollectionFromJson(...PipelineCollection);
6584
6529
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({