@promptbook/node 0.101.0-8 → 0.101.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. package/README.md +0 -4
  2. package/esm/index.es.js +59 -112
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +8 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +2 -8
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +2 -8
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +2 -8
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +2 -8
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +2 -8
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  21. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +25 -10
  22. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -8
  23. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +2 -8
  24. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +2 -8
  27. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +2 -8
  28. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  30. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  33. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  34. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  35. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  43. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  44. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  45. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  46. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  47. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  48. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  51. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  52. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  53. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  54. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  55. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  56. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  57. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  58. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  59. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  60. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  61. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  62. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  63. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  64. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  65. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  66. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  67. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  68. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  69. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  70. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  71. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  72. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  73. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  74. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  75. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  76. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  77. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  78. package/esm/typings/src/version.d.ts +1 -1
  79. package/package.json +2 -2
  80. package/umd/index.umd.js +59 -112
  81. package/umd/index.umd.js.map +1 -1
  82. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  83. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  84. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  85. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  86. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  87. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  88. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/README.md CHANGED
@@ -29,10 +29,6 @@ Write AI applications using plain human language across multiple models and plat
29
29
 
30
30
 
31
31
 
32
- <blockquote style="color: #ff8811">
33
- <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
34
- </blockquote>
35
-
36
32
  ## 📦 Package `@promptbook/node`
37
33
 
38
34
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -28,7 +28,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-8';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3011,75 +3011,32 @@ function mapAvailableToExpectedParameters(options) {
3011
3011
  }
3012
3012
 
3013
3013
  /**
3014
- * Predefined profiles for LLM providers to maintain consistency across the application
3015
- * These profiles represent each provider as a virtual persona in chat interfaces
3014
+ * Takes an item or an array of items and returns an array of items
3016
3015
  *
3017
- * @private !!!!
3016
+ * 1) Any item except array and undefined returns array with that one item (also null)
3017
+ * 2) Undefined returns empty array
3018
+ * 3) Array returns itself
3019
+ *
3020
+ * @private internal utility
3018
3021
  */
3019
- const LLM_PROVIDER_PROFILES = {
3020
- OPENAI: {
3021
- name: 'OPENAI',
3022
- fullname: 'OpenAI GPT',
3023
- color: '#10a37f', // OpenAI's signature green
3024
- // Note: avatarSrc could be added when we have provider logos available
3025
- },
3026
- ANTHROPIC: {
3027
- name: 'ANTHROPIC',
3028
- fullname: 'Anthropic Claude',
3029
- color: '#d97706', // Anthropic's orange/amber color
3030
- },
3031
- AZURE_OPENAI: {
3032
- name: 'AZURE_OPENAI',
3033
- fullname: 'Azure OpenAI',
3034
- color: '#0078d4', // Microsoft Azure blue
3035
- },
3036
- GOOGLE: {
3037
- name: 'GOOGLE',
3038
- fullname: 'Google Gemini',
3039
- color: '#4285f4', // Google blue
3040
- },
3041
- DEEPSEEK: {
3042
- name: 'DEEPSEEK',
3043
- fullname: 'DeepSeek',
3044
- color: '#7c3aed', // Purple color for DeepSeek
3045
- },
3046
- OLLAMA: {
3047
- name: 'OLLAMA',
3048
- fullname: 'Ollama',
3049
- color: '#059669', // Emerald green for local models
3050
- },
3051
- REMOTE: {
3052
- name: 'REMOTE',
3053
- fullname: 'Remote Server',
3054
- color: '#6b7280', // Gray for remote/proxy connections
3055
- },
3056
- MOCKED_ECHO: {
3057
- name: 'MOCKED_ECHO',
3058
- fullname: 'Echo (Test)',
3059
- color: '#8b5cf6', // Purple for test/mock tools
3060
- },
3061
- MOCKED_FAKE: {
3062
- name: 'MOCKED_FAKE',
3063
- fullname: 'Fake LLM (Test)',
3064
- color: '#ec4899', // Pink for fake/test tools
3065
- },
3066
- VERCEL: {
3067
- name: 'VERCEL',
3068
- fullname: 'Vercel AI',
3069
- color: '#000000', // Vercel's black
3070
- },
3071
- MULTIPLE: {
3072
- name: 'MULTIPLE',
3073
- fullname: 'Multiple Providers',
3074
- color: '#6366f1', // Indigo for combined/multiple providers
3075
- },
3076
- };
3022
+ function arrayableToArray(input) {
3023
+ if (input === undefined) {
3024
+ return [];
3025
+ }
3026
+ if (input instanceof Array) {
3027
+ return input;
3028
+ }
3029
+ return [input];
3030
+ }
3031
+
3077
3032
  /**
3078
- * TODO: Refactor this - each profile must be alongside the provider definition
3079
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
3080
- * Note: [💞] Ignore a discrepancy between file name and entity name
3033
+ * Profile for Multiple providers aggregation
3081
3034
  */
3082
-
3035
+ const MULTIPLE_PROVIDER_PROFILE = {
3036
+ name: 'MULTIPLE',
3037
+ fullname: 'Multiple Providers',
3038
+ color: '#6366f1',
3039
+ };
3083
3040
  /**
3084
3041
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
3085
3042
  *
@@ -3090,12 +3047,10 @@ class MultipleLlmExecutionTools {
3090
3047
  /**
3091
3048
  * Gets array of execution tools in order of priority
3092
3049
  */
3093
- constructor(...llmExecutionTools) {
3050
+ constructor(title, ...llmExecutionTools) {
3051
+ this.title = title;
3094
3052
  this.llmExecutionTools = llmExecutionTools;
3095
3053
  }
3096
- get title() {
3097
- return 'Multiple LLM Providers';
3098
- }
3099
3054
  get description() {
3100
3055
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3101
3056
  .map(({ title, description }, index) => {
@@ -3117,7 +3072,7 @@ class MultipleLlmExecutionTools {
3117
3072
  `);
3118
3073
  }
3119
3074
  get profile() {
3120
- return LLM_PROVIDER_PROFILES.MULTIPLE;
3075
+ return MULTIPLE_PROVIDER_PROFILE;
3121
3076
  }
3122
3077
  /**
3123
3078
  * Check the configuration of all execution tools
@@ -3181,7 +3136,7 @@ class MultipleLlmExecutionTools {
3181
3136
  return await llmExecutionTools.callEmbeddingModel(prompt);
3182
3137
  // <- case [🤖]:
3183
3138
  default:
3184
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3139
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3185
3140
  }
3186
3141
  }
3187
3142
  catch (error) {
@@ -3202,7 +3157,7 @@ class MultipleLlmExecutionTools {
3202
3157
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3203
3158
  // 3) ...
3204
3159
  spaceTrim((block) => `
3205
- All execution tools failed:
3160
+ All execution tools of ${this.title} failed:
3206
3161
 
3207
3162
  ${block(errors
3208
3163
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3211,11 +3166,11 @@ class MultipleLlmExecutionTools {
3211
3166
  `));
3212
3167
  }
3213
3168
  else if (this.llmExecutionTools.length === 0) {
3214
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3169
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3215
3170
  }
3216
3171
  else {
3217
3172
  throw new PipelineExecutionError(spaceTrim((block) => `
3218
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3173
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3219
3174
 
3220
3175
  Available \`LlmExecutionTools\`:
3221
3176
  ${block(this.description)}
@@ -3245,7 +3200,7 @@ class MultipleLlmExecutionTools {
3245
3200
  *
3246
3201
  * @public exported from `@promptbook/core`
3247
3202
  */
3248
- function joinLlmExecutionTools(...llmExecutionTools) {
3203
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3249
3204
  if (llmExecutionTools.length === 0) {
3250
3205
  const warningMessage = spaceTrim(`
3251
3206
  You have not provided any \`LlmExecutionTools\`
@@ -3277,30 +3232,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3277
3232
  };
3278
3233
  */
3279
3234
  }
3280
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3235
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3281
3236
  }
3282
3237
  /**
3283
3238
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3284
3239
  */
3285
3240
 
3286
3241
  /**
3287
- * Takes an item or an array of items and returns an array of items
3288
- *
3289
- * 1) Any item except array and undefined returns array with that one item (also null)
3290
- * 2) Undefined returns empty array
3291
- * 3) Array returns itself
3242
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3292
3243
  *
3293
- * @private internal utility
3244
+ * @public exported from `@promptbook/core`
3294
3245
  */
3295
- function arrayableToArray(input) {
3296
- if (input === undefined) {
3297
- return [];
3298
- }
3299
- if (input instanceof Array) {
3300
- return input;
3301
- }
3302
- return [input];
3246
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3247
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3248
+ const llmTools = _llms.length === 1
3249
+ ? _llms[0]
3250
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3251
+ return llmTools;
3303
3252
  }
3253
+ /**
3254
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3255
+ */
3304
3256
 
3305
3257
  /**
3306
3258
  * Just says that the variable is not used but should be kept
@@ -4003,9 +3955,7 @@ async function executeAttempts(options) {
4003
3955
  $scriptPipelineExecutionErrors: [],
4004
3956
  $failedResults: [], // Track all failed attempts
4005
3957
  };
4006
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4007
- const _llms = arrayableToArray(tools.llm);
4008
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3958
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4009
3959
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
4010
3960
  const isJokerAttempt = attemptIndex < 0;
4011
3961
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -4525,9 +4475,7 @@ async function getKnowledgeForTask(options) {
4525
4475
  return ''; // <- Note: Np knowledge present, return empty string
4526
4476
  }
4527
4477
  try {
4528
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4529
- const _llms = arrayableToArray(tools.llm);
4530
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4478
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4531
4479
  const taskEmbeddingPrompt = {
4532
4480
  title: 'Knowledge Search',
4533
4481
  modelRequirements: {
@@ -5128,13 +5076,13 @@ function createPipelineExecutor(options) {
5128
5076
  // Calculate and update tldr based on pipeline progress
5129
5077
  const cv = newOngoingResult;
5130
5078
  // Calculate progress based on parameters resolved vs total parameters
5131
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
5079
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
5132
5080
  let resolvedParameters = 0;
5133
5081
  let currentTaskTitle = '';
5134
5082
  // Get the resolved parameters from output parameters
5135
5083
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
5136
5084
  // Count how many output parameters have non-empty values
5137
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
5085
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
5138
5086
  }
5139
5087
  // Try to determine current task from execution report
5140
5088
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -5307,8 +5255,7 @@ async function preparePersona(personaDescription, tools, options) {
5307
5255
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
5308
5256
  tools,
5309
5257
  });
5310
- const _llms = arrayableToArray(tools.llm);
5311
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5258
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5312
5259
  const availableModels = (await llmTools.listModels())
5313
5260
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
5314
5261
  .map(({ modelName, modelDescription }) => ({
@@ -5352,6 +5299,7 @@ async function preparePersona(personaDescription, tools, options) {
5352
5299
  };
5353
5300
  }
5354
5301
  /**
5302
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
5355
5303
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
5356
5304
  * TODO: [🏢] Check validity of `modelName` in pipeline
5357
5305
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -6207,9 +6155,7 @@ async function preparePipeline(pipeline, tools, options) {
6207
6155
  if (tools === undefined || tools.llm === undefined) {
6208
6156
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
6209
6157
  }
6210
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6211
- const _llms = arrayableToArray(tools.llm);
6212
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6158
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6213
6159
  const llmToolsWithUsage = countUsage(llmTools);
6214
6160
  // <- TODO: [🌯]
6215
6161
  /*
@@ -10564,7 +10510,7 @@ const $isRunningInWebWorker = new Function(`
10564
10510
  * @public exported from `@promptbook/core`
10565
10511
  */
10566
10512
  function createLlmToolsFromConfiguration(configuration, options = {}) {
10567
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
10513
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
10568
10514
  const llmTools = configuration.map((llmConfiguration) => {
10569
10515
  const registeredItem = $llmToolsRegister
10570
10516
  .list()
@@ -10596,7 +10542,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
10596
10542
  ...llmConfiguration.options,
10597
10543
  });
10598
10544
  });
10599
- return joinLlmExecutionTools(...llmTools);
10545
+ return joinLlmExecutionTools(title, ...llmTools);
10600
10546
  }
10601
10547
  /**
10602
10548
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -11196,7 +11142,7 @@ async function $provideExecutionToolsForNode(options) {
11196
11142
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
11197
11143
  }
11198
11144
  const fs = $provideFilesystemForNode();
11199
- const llm = await $provideLlmToolsFromEnv(options);
11145
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
11200
11146
  const executables = await $provideExecutablesForNode();
11201
11147
  const tools = {
11202
11148
  llm,
@@ -11629,11 +11575,12 @@ class FileCacheStorage {
11629
11575
  catch (error) {
11630
11576
  // Note: If we can't write to cache, silently ignore the error
11631
11577
  // This handles read-only filesystems, permission issues, and missing parent directories
11632
- if (error instanceof Error && (error.message.includes('EROFS') ||
11633
- error.message.includes('read-only') ||
11634
- error.message.includes('EACCES') ||
11635
- error.message.includes('EPERM') ||
11636
- error.message.includes('ENOENT'))) {
11578
+ if (error instanceof Error &&
11579
+ (error.message.includes('EROFS') ||
11580
+ error.message.includes('read-only') ||
11581
+ error.message.includes('EACCES') ||
11582
+ error.message.includes('EPERM') ||
11583
+ error.message.includes('ENOENT'))) {
11637
11584
  // Silently ignore filesystem errors - caching is optional
11638
11585
  return;
11639
11586
  }