@promptbook/documents 0.101.0-8 → 0.101.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +57 -112
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +8 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +2 -8
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +2 -8
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +2 -8
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +2 -8
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +2 -8
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  21. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +25 -10
  22. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -8
  23. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +2 -8
  24. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +2 -8
  27. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +2 -8
  28. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  30. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  33. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  34. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  35. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  43. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  44. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  45. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  46. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  47. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  48. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  51. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  52. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  53. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  54. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  55. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  56. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  57. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  58. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  59. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  60. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  61. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  62. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  63. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  64. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  65. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  66. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  67. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  68. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  69. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  70. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  71. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  72. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  73. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  74. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  75. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  76. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  77. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  78. package/esm/typings/src/version.d.ts +1 -1
  79. package/package.json +2 -2
  80. package/umd/index.umd.js +57 -112
  81. package/umd/index.umd.js.map +1 -1
  82. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  83. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  84. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  85. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  86. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  87. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  88. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/README.md CHANGED
@@ -29,10 +29,6 @@ Write AI applications using plain human language across multiple models and plat
29
29
 
30
30
 
31
31
 
32
- <blockquote style="color: #ff8811">
33
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
34
- </blockquote>
35
-
36
32
  ## 📦 Package `@promptbook/documents`
37
33
 
38
34
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -26,7 +26,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
26
26
  * @generated
27
27
  * @see https://github.com/webgptorg/promptbook
28
28
  */
29
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-8';
29
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
30
30
  /**
31
31
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
32
32
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1056,11 +1056,12 @@ async function getScraperIntermediateSource(source, options) {
1056
1056
  catch (error) {
1057
1057
  // Note: If we can't create cache directory, continue without it
1058
1058
  // This handles read-only filesystems, permission issues, and missing parent directories
1059
- if (error instanceof Error && (error.message.includes('EROFS') ||
1060
- error.message.includes('read-only') ||
1061
- error.message.includes('EACCES') ||
1062
- error.message.includes('EPERM') ||
1063
- error.message.includes('ENOENT'))) ;
1059
+ if (error instanceof Error &&
1060
+ (error.message.includes('EROFS') ||
1061
+ error.message.includes('read-only') ||
1062
+ error.message.includes('EACCES') ||
1063
+ error.message.includes('EPERM') ||
1064
+ error.message.includes('ENOENT'))) ;
1064
1065
  else {
1065
1066
  // Re-throw other unexpected errors
1066
1067
  throw error;
@@ -3121,75 +3122,32 @@ function countUsage(llmTools) {
3121
3122
  */
3122
3123
 
3123
3124
  /**
3124
- * Predefined profiles for LLM providers to maintain consistency across the application
3125
- * These profiles represent each provider as a virtual persona in chat interfaces
3125
+ * Takes an item or an array of items and returns an array of items
3126
3126
  *
3127
- * @private !!!!
3127
+ * 1) Any item except array and undefined returns array with that one item (also null)
3128
+ * 2) Undefined returns empty array
3129
+ * 3) Array returns itself
3130
+ *
3131
+ * @private internal utility
3128
3132
  */
3129
- const LLM_PROVIDER_PROFILES = {
3130
- OPENAI: {
3131
- name: 'OPENAI',
3132
- fullname: 'OpenAI GPT',
3133
- color: '#10a37f', // OpenAI's signature green
3134
- // Note: avatarSrc could be added when we have provider logos available
3135
- },
3136
- ANTHROPIC: {
3137
- name: 'ANTHROPIC',
3138
- fullname: 'Anthropic Claude',
3139
- color: '#d97706', // Anthropic's orange/amber color
3140
- },
3141
- AZURE_OPENAI: {
3142
- name: 'AZURE_OPENAI',
3143
- fullname: 'Azure OpenAI',
3144
- color: '#0078d4', // Microsoft Azure blue
3145
- },
3146
- GOOGLE: {
3147
- name: 'GOOGLE',
3148
- fullname: 'Google Gemini',
3149
- color: '#4285f4', // Google blue
3150
- },
3151
- DEEPSEEK: {
3152
- name: 'DEEPSEEK',
3153
- fullname: 'DeepSeek',
3154
- color: '#7c3aed', // Purple color for DeepSeek
3155
- },
3156
- OLLAMA: {
3157
- name: 'OLLAMA',
3158
- fullname: 'Ollama',
3159
- color: '#059669', // Emerald green for local models
3160
- },
3161
- REMOTE: {
3162
- name: 'REMOTE',
3163
- fullname: 'Remote Server',
3164
- color: '#6b7280', // Gray for remote/proxy connections
3165
- },
3166
- MOCKED_ECHO: {
3167
- name: 'MOCKED_ECHO',
3168
- fullname: 'Echo (Test)',
3169
- color: '#8b5cf6', // Purple for test/mock tools
3170
- },
3171
- MOCKED_FAKE: {
3172
- name: 'MOCKED_FAKE',
3173
- fullname: 'Fake LLM (Test)',
3174
- color: '#ec4899', // Pink for fake/test tools
3175
- },
3176
- VERCEL: {
3177
- name: 'VERCEL',
3178
- fullname: 'Vercel AI',
3179
- color: '#000000', // Vercel's black
3180
- },
3181
- MULTIPLE: {
3182
- name: 'MULTIPLE',
3183
- fullname: 'Multiple Providers',
3184
- color: '#6366f1', // Indigo for combined/multiple providers
3185
- },
3186
- };
3133
+ function arrayableToArray(input) {
3134
+ if (input === undefined) {
3135
+ return [];
3136
+ }
3137
+ if (input instanceof Array) {
3138
+ return input;
3139
+ }
3140
+ return [input];
3141
+ }
3142
+
3187
3143
  /**
3188
- * TODO: Refactor this - each profile must be alongside the provider definition
3189
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
3190
- * Note: [💞] Ignore a discrepancy between file name and entity name
3144
+ * Profile for Multiple providers aggregation
3191
3145
  */
3192
-
3146
+ const MULTIPLE_PROVIDER_PROFILE = {
3147
+ name: 'MULTIPLE',
3148
+ fullname: 'Multiple Providers',
3149
+ color: '#6366f1',
3150
+ };
3193
3151
  /**
3194
3152
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
3195
3153
  *
@@ -3200,12 +3158,10 @@ class MultipleLlmExecutionTools {
3200
3158
  /**
3201
3159
  * Gets array of execution tools in order of priority
3202
3160
  */
3203
- constructor(...llmExecutionTools) {
3161
+ constructor(title, ...llmExecutionTools) {
3162
+ this.title = title;
3204
3163
  this.llmExecutionTools = llmExecutionTools;
3205
3164
  }
3206
- get title() {
3207
- return 'Multiple LLM Providers';
3208
- }
3209
3165
  get description() {
3210
3166
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3211
3167
  .map(({ title, description }, index) => {
@@ -3227,7 +3183,7 @@ class MultipleLlmExecutionTools {
3227
3183
  `);
3228
3184
  }
3229
3185
  get profile() {
3230
- return LLM_PROVIDER_PROFILES.MULTIPLE;
3186
+ return MULTIPLE_PROVIDER_PROFILE;
3231
3187
  }
3232
3188
  /**
3233
3189
  * Check the configuration of all execution tools
@@ -3291,7 +3247,7 @@ class MultipleLlmExecutionTools {
3291
3247
  return await llmExecutionTools.callEmbeddingModel(prompt);
3292
3248
  // <- case [🤖]:
3293
3249
  default:
3294
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3250
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3295
3251
  }
3296
3252
  }
3297
3253
  catch (error) {
@@ -3312,7 +3268,7 @@ class MultipleLlmExecutionTools {
3312
3268
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3313
3269
  // 3) ...
3314
3270
  spaceTrim$1((block) => `
3315
- All execution tools failed:
3271
+ All execution tools of ${this.title} failed:
3316
3272
 
3317
3273
  ${block(errors
3318
3274
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3321,11 +3277,11 @@ class MultipleLlmExecutionTools {
3321
3277
  `));
3322
3278
  }
3323
3279
  else if (this.llmExecutionTools.length === 0) {
3324
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3280
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3325
3281
  }
3326
3282
  else {
3327
3283
  throw new PipelineExecutionError(spaceTrim$1((block) => `
3328
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3284
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3329
3285
 
3330
3286
  Available \`LlmExecutionTools\`:
3331
3287
  ${block(this.description)}
@@ -3355,7 +3311,7 @@ class MultipleLlmExecutionTools {
3355
3311
  *
3356
3312
  * @public exported from `@promptbook/core`
3357
3313
  */
3358
- function joinLlmExecutionTools(...llmExecutionTools) {
3314
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3359
3315
  if (llmExecutionTools.length === 0) {
3360
3316
  const warningMessage = spaceTrim$1(`
3361
3317
  You have not provided any \`LlmExecutionTools\`
@@ -3387,30 +3343,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3387
3343
  };
3388
3344
  */
3389
3345
  }
3390
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3346
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3391
3347
  }
3392
3348
  /**
3393
3349
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3394
3350
  */
3395
3351
 
3396
3352
  /**
3397
- * Takes an item or an array of items and returns an array of items
3398
- *
3399
- * 1) Any item except array and undefined returns array with that one item (also null)
3400
- * 2) Undefined returns empty array
3401
- * 3) Array returns itself
3353
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3402
3354
  *
3403
- * @private internal utility
3355
+ * @public exported from `@promptbook/core`
3404
3356
  */
3405
- function arrayableToArray(input) {
3406
- if (input === undefined) {
3407
- return [];
3408
- }
3409
- if (input instanceof Array) {
3410
- return input;
3411
- }
3412
- return [input];
3357
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3358
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3359
+ const llmTools = _llms.length === 1
3360
+ ? _llms[0]
3361
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3362
+ return llmTools;
3413
3363
  }
3364
+ /**
3365
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3366
+ */
3414
3367
 
3415
3368
  /**
3416
3369
  * Prepares the persona for the pipeline
@@ -3429,8 +3382,7 @@ async function preparePersona(personaDescription, tools, options) {
3429
3382
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3430
3383
  tools,
3431
3384
  });
3432
- const _llms = arrayableToArray(tools.llm);
3433
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3385
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3434
3386
  const availableModels = (await llmTools.listModels())
3435
3387
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3436
3388
  .map(({ modelName, modelDescription }) => ({
@@ -3474,6 +3426,7 @@ async function preparePersona(personaDescription, tools, options) {
3474
3426
  };
3475
3427
  }
3476
3428
  /**
3429
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
3477
3430
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
3478
3431
  * TODO: [🏢] Check validity of `modelName` in pipeline
3479
3432
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -4191,9 +4144,7 @@ async function preparePipeline(pipeline, tools, options) {
4191
4144
  if (tools === undefined || tools.llm === undefined) {
4192
4145
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4193
4146
  }
4194
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4195
- const _llms = arrayableToArray(tools.llm);
4196
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4147
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4197
4148
  const llmToolsWithUsage = countUsage(llmTools);
4198
4149
  // <- TODO: [🌯]
4199
4150
  /*
@@ -5336,9 +5287,7 @@ async function executeAttempts(options) {
5336
5287
  $scriptPipelineExecutionErrors: [],
5337
5288
  $failedResults: [], // Track all failed attempts
5338
5289
  };
5339
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5340
- const _llms = arrayableToArray(tools.llm);
5341
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5290
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5342
5291
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5343
5292
  const isJokerAttempt = attemptIndex < 0;
5344
5293
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5858,9 +5807,7 @@ async function getKnowledgeForTask(options) {
5858
5807
  return ''; // <- Note: Np knowledge present, return empty string
5859
5808
  }
5860
5809
  try {
5861
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5862
- const _llms = arrayableToArray(tools.llm);
5863
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5810
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5864
5811
  const taskEmbeddingPrompt = {
5865
5812
  title: 'Knowledge Search',
5866
5813
  modelRequirements: {
@@ -6461,13 +6408,13 @@ function createPipelineExecutor(options) {
6461
6408
  // Calculate and update tldr based on pipeline progress
6462
6409
  const cv = newOngoingResult;
6463
6410
  // Calculate progress based on parameters resolved vs total parameters
6464
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6411
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6465
6412
  let resolvedParameters = 0;
6466
6413
  let currentTaskTitle = '';
6467
6414
  // Get the resolved parameters from output parameters
6468
6415
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6469
6416
  // Count how many output parameters have non-empty values
6470
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6417
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6471
6418
  }
6472
6419
  // Try to determine current task from execution report
6473
6420
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6577,9 +6524,7 @@ class MarkdownScraper {
6577
6524
  throw new MissingToolsError('LLM tools are required for scraping external files');
6578
6525
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6579
6526
  }
6580
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6581
- const _llms = arrayableToArray(llm);
6582
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6527
+ const llmTools = getSingleLlmExecutionTools(llm);
6583
6528
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6584
6529
  const collection = createCollectionFromJson(...PipelineCollection);
6585
6530
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({