@promptbook/node 0.101.0-9 → 0.102.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/esm/index.es.js +59 -112
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +30 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +12 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
  11. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
  13. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  15. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
  16. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
  17. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  18. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
  23. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  24. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
  27. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  28. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  30. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  33. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  34. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  35. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
  43. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  44. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  45. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  46. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  47. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  48. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  52. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  53. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  54. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  55. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  56. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  57. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  58. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  59. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  60. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  61. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  62. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  63. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  64. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  65. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  66. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  67. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  68. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  69. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  70. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  71. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  72. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  73. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  74. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  75. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  76. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  77. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  78. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  79. package/esm/typings/src/version.d.ts +1 -1
  80. package/package.json +2 -2
  81. package/umd/index.umd.js +59 -112
  82. package/umd/index.umd.js.map +1 -1
  83. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  84. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  85. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  86. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  87. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  88. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  89. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -2,11 +2,11 @@ import { string_markdown } from '../../types/typeAliases';
2
2
  /**
3
3
  * Function `promptbookifyAiText` will slightly modify the text so we know it was processed by Promptbook
4
4
  *
5
+ * Note: [🔂] This function is idempotent.
6
+ *
5
7
  * @public exported from `@promptbook/markdown-utils`
6
8
  */
7
9
  export declare function promptbookifyAiText(text: string_markdown): string_markdown;
8
10
  /**
9
- * TODO: !!!!! Make the function idempotent and add "Note: [🔂] This function is idempotent."
10
- * TODO: [🅾️]!!! Use this across the project where AI text is involved
11
11
  * TODO: [🧠][✌️] Make some Promptbook-native token system
12
12
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-8`).
18
+ * It follows semantic versioning (e.g., `0.101.0`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.101.0-9",
3
+ "version": "0.102.0-0",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -93,7 +93,7 @@
93
93
  "module": "./esm/index.es.js",
94
94
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
95
95
  "peerDependencies": {
96
- "@promptbook/core": "0.101.0-9"
96
+ "@promptbook/core": "0.102.0-0"
97
97
  },
98
98
  "dependencies": {
99
99
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-9';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3028,75 +3028,32 @@
3028
3028
  }
3029
3029
 
3030
3030
  /**
3031
- * Predefined profiles for LLM providers to maintain consistency across the application
3032
- * These profiles represent each provider as a virtual persona in chat interfaces
3031
+ * Takes an item or an array of items and returns an array of items
3033
3032
  *
3034
- * @private !!!!
3033
+ * 1) Any item except array and undefined returns array with that one item (also null)
3034
+ * 2) Undefined returns empty array
3035
+ * 3) Array returns itself
3036
+ *
3037
+ * @private internal utility
3035
3038
  */
3036
- const LLM_PROVIDER_PROFILES = {
3037
- OPENAI: {
3038
- name: 'OPENAI',
3039
- fullname: 'OpenAI GPT',
3040
- color: '#10a37f', // OpenAI's signature green
3041
- // Note: avatarSrc could be added when we have provider logos available
3042
- },
3043
- ANTHROPIC: {
3044
- name: 'ANTHROPIC',
3045
- fullname: 'Anthropic Claude',
3046
- color: '#d97706', // Anthropic's orange/amber color
3047
- },
3048
- AZURE_OPENAI: {
3049
- name: 'AZURE_OPENAI',
3050
- fullname: 'Azure OpenAI',
3051
- color: '#0078d4', // Microsoft Azure blue
3052
- },
3053
- GOOGLE: {
3054
- name: 'GOOGLE',
3055
- fullname: 'Google Gemini',
3056
- color: '#4285f4', // Google blue
3057
- },
3058
- DEEPSEEK: {
3059
- name: 'DEEPSEEK',
3060
- fullname: 'DeepSeek',
3061
- color: '#7c3aed', // Purple color for DeepSeek
3062
- },
3063
- OLLAMA: {
3064
- name: 'OLLAMA',
3065
- fullname: 'Ollama',
3066
- color: '#059669', // Emerald green for local models
3067
- },
3068
- REMOTE: {
3069
- name: 'REMOTE',
3070
- fullname: 'Remote Server',
3071
- color: '#6b7280', // Gray for remote/proxy connections
3072
- },
3073
- MOCKED_ECHO: {
3074
- name: 'MOCKED_ECHO',
3075
- fullname: 'Echo (Test)',
3076
- color: '#8b5cf6', // Purple for test/mock tools
3077
- },
3078
- MOCKED_FAKE: {
3079
- name: 'MOCKED_FAKE',
3080
- fullname: 'Fake LLM (Test)',
3081
- color: '#ec4899', // Pink for fake/test tools
3082
- },
3083
- VERCEL: {
3084
- name: 'VERCEL',
3085
- fullname: 'Vercel AI',
3086
- color: '#000000', // Vercel's black
3087
- },
3088
- MULTIPLE: {
3089
- name: 'MULTIPLE',
3090
- fullname: 'Multiple Providers',
3091
- color: '#6366f1', // Indigo for combined/multiple providers
3092
- },
3093
- };
3039
+ function arrayableToArray(input) {
3040
+ if (input === undefined) {
3041
+ return [];
3042
+ }
3043
+ if (input instanceof Array) {
3044
+ return input;
3045
+ }
3046
+ return [input];
3047
+ }
3048
+
3094
3049
  /**
3095
- * TODO: Refactor this - each profile must be alongside the provider definition
3096
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
3097
- * Note: [💞] Ignore a discrepancy between file name and entity name
3050
+ * Profile for Multiple providers aggregation
3098
3051
  */
3099
-
3052
+ const MULTIPLE_PROVIDER_PROFILE = {
3053
+ name: 'MULTIPLE',
3054
+ fullname: 'Multiple Providers',
3055
+ color: '#6366f1',
3056
+ };
3100
3057
  /**
3101
3058
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
3102
3059
  *
@@ -3107,12 +3064,10 @@
3107
3064
  /**
3108
3065
  * Gets array of execution tools in order of priority
3109
3066
  */
3110
- constructor(...llmExecutionTools) {
3067
+ constructor(title, ...llmExecutionTools) {
3068
+ this.title = title;
3111
3069
  this.llmExecutionTools = llmExecutionTools;
3112
3070
  }
3113
- get title() {
3114
- return 'Multiple LLM Providers';
3115
- }
3116
3071
  get description() {
3117
3072
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3118
3073
  .map(({ title, description }, index) => {
@@ -3134,7 +3089,7 @@
3134
3089
  `);
3135
3090
  }
3136
3091
  get profile() {
3137
- return LLM_PROVIDER_PROFILES.MULTIPLE;
3092
+ return MULTIPLE_PROVIDER_PROFILE;
3138
3093
  }
3139
3094
  /**
3140
3095
  * Check the configuration of all execution tools
@@ -3198,7 +3153,7 @@
3198
3153
  return await llmExecutionTools.callEmbeddingModel(prompt);
3199
3154
  // <- case [🤖]:
3200
3155
  default:
3201
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3156
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3202
3157
  }
3203
3158
  }
3204
3159
  catch (error) {
@@ -3219,7 +3174,7 @@
3219
3174
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3220
3175
  // 3) ...
3221
3176
  spaceTrim__default["default"]((block) => `
3222
- All execution tools failed:
3177
+ All execution tools of ${this.title} failed:
3223
3178
 
3224
3179
  ${block(errors
3225
3180
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3228,11 +3183,11 @@
3228
3183
  `));
3229
3184
  }
3230
3185
  else if (this.llmExecutionTools.length === 0) {
3231
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3186
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3232
3187
  }
3233
3188
  else {
3234
3189
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3235
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3190
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3236
3191
 
3237
3192
  Available \`LlmExecutionTools\`:
3238
3193
  ${block(this.description)}
@@ -3262,7 +3217,7 @@
3262
3217
  *
3263
3218
  * @public exported from `@promptbook/core`
3264
3219
  */
3265
- function joinLlmExecutionTools(...llmExecutionTools) {
3220
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3266
3221
  if (llmExecutionTools.length === 0) {
3267
3222
  const warningMessage = spaceTrim__default["default"](`
3268
3223
  You have not provided any \`LlmExecutionTools\`
@@ -3294,30 +3249,27 @@
3294
3249
  };
3295
3250
  */
3296
3251
  }
3297
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3252
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3298
3253
  }
3299
3254
  /**
3300
3255
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3301
3256
  */
3302
3257
 
3303
3258
  /**
3304
- * Takes an item or an array of items and returns an array of items
3305
- *
3306
- * 1) Any item except array and undefined returns array with that one item (also null)
3307
- * 2) Undefined returns empty array
3308
- * 3) Array returns itself
3259
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3309
3260
  *
3310
- * @private internal utility
3261
+ * @public exported from `@promptbook/core`
3311
3262
  */
3312
- function arrayableToArray(input) {
3313
- if (input === undefined) {
3314
- return [];
3315
- }
3316
- if (input instanceof Array) {
3317
- return input;
3318
- }
3319
- return [input];
3263
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3264
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3265
+ const llmTools = _llms.length === 1
3266
+ ? _llms[0]
3267
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3268
+ return llmTools;
3320
3269
  }
3270
+ /**
3271
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3272
+ */
3321
3273
 
3322
3274
  /**
3323
3275
  * Just says that the variable is not used but should be kept
@@ -4020,9 +3972,7 @@
4020
3972
  $scriptPipelineExecutionErrors: [],
4021
3973
  $failedResults: [], // Track all failed attempts
4022
3974
  };
4023
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4024
- const _llms = arrayableToArray(tools.llm);
4025
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3975
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4026
3976
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
4027
3977
  const isJokerAttempt = attemptIndex < 0;
4028
3978
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -4542,9 +4492,7 @@
4542
4492
  return ''; // <- Note: Np knowledge present, return empty string
4543
4493
  }
4544
4494
  try {
4545
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4546
- const _llms = arrayableToArray(tools.llm);
4547
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4495
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4548
4496
  const taskEmbeddingPrompt = {
4549
4497
  title: 'Knowledge Search',
4550
4498
  modelRequirements: {
@@ -5145,13 +5093,13 @@
5145
5093
  // Calculate and update tldr based on pipeline progress
5146
5094
  const cv = newOngoingResult;
5147
5095
  // Calculate progress based on parameters resolved vs total parameters
5148
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
5096
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
5149
5097
  let resolvedParameters = 0;
5150
5098
  let currentTaskTitle = '';
5151
5099
  // Get the resolved parameters from output parameters
5152
5100
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
5153
5101
  // Count how many output parameters have non-empty values
5154
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
5102
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
5155
5103
  }
5156
5104
  // Try to determine current task from execution report
5157
5105
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -5324,8 +5272,7 @@
5324
5272
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
5325
5273
  tools,
5326
5274
  });
5327
- const _llms = arrayableToArray(tools.llm);
5328
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5275
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5329
5276
  const availableModels = (await llmTools.listModels())
5330
5277
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
5331
5278
  .map(({ modelName, modelDescription }) => ({
@@ -5369,6 +5316,7 @@
5369
5316
  };
5370
5317
  }
5371
5318
  /**
5319
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
5372
5320
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
5373
5321
  * TODO: [🏢] Check validity of `modelName` in pipeline
5374
5322
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -6224,9 +6172,7 @@
6224
6172
  if (tools === undefined || tools.llm === undefined) {
6225
6173
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
6226
6174
  }
6227
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6228
- const _llms = arrayableToArray(tools.llm);
6229
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6175
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6230
6176
  const llmToolsWithUsage = countUsage(llmTools);
6231
6177
  // <- TODO: [🌯]
6232
6178
  /*
@@ -10581,7 +10527,7 @@
10581
10527
  * @public exported from `@promptbook/core`
10582
10528
  */
10583
10529
  function createLlmToolsFromConfiguration(configuration, options = {}) {
10584
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
10530
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
10585
10531
  const llmTools = configuration.map((llmConfiguration) => {
10586
10532
  const registeredItem = $llmToolsRegister
10587
10533
  .list()
@@ -10613,7 +10559,7 @@
10613
10559
  ...llmConfiguration.options,
10614
10560
  });
10615
10561
  });
10616
- return joinLlmExecutionTools(...llmTools);
10562
+ return joinLlmExecutionTools(title, ...llmTools);
10617
10563
  }
10618
10564
  /**
10619
10565
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -11213,7 +11159,7 @@
11213
11159
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
11214
11160
  }
11215
11161
  const fs = $provideFilesystemForNode();
11216
- const llm = await $provideLlmToolsFromEnv(options);
11162
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
11217
11163
  const executables = await $provideExecutablesForNode();
11218
11164
  const tools = {
11219
11165
  llm,
@@ -11646,11 +11592,12 @@
11646
11592
  catch (error) {
11647
11593
  // Note: If we can't write to cache, silently ignore the error
11648
11594
  // This handles read-only filesystems, permission issues, and missing parent directories
11649
- if (error instanceof Error && (error.message.includes('EROFS') ||
11650
- error.message.includes('read-only') ||
11651
- error.message.includes('EACCES') ||
11652
- error.message.includes('EPERM') ||
11653
- error.message.includes('ENOENT'))) {
11595
+ if (error instanceof Error &&
11596
+ (error.message.includes('EROFS') ||
11597
+ error.message.includes('read-only') ||
11598
+ error.message.includes('EACCES') ||
11599
+ error.message.includes('EPERM') ||
11600
+ error.message.includes('ENOENT'))) {
11654
11601
  // Silently ignore filesystem errors - caching is optional
11655
11602
  return;
11656
11603
  }