@promptbook/remote-server 0.101.0-9 → 0.102.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/esm/index.es.js +58 -109
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +30 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +12 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
  11. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
  13. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  15. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
  16. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
  17. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  18. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
  23. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  24. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
  27. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  28. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  30. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  33. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  34. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  35. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
  43. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  44. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  45. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  46. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  47. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  48. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  52. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  53. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  54. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  55. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  56. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  57. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  58. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  59. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  60. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  61. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  62. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  63. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  64. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  65. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  66. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  67. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  68. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  69. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  70. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  71. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  72. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  73. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  74. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  75. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  76. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  77. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  78. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  79. package/esm/typings/src/version.d.ts +1 -1
  80. package/package.json +2 -2
  81. package/umd/index.umd.js +58 -109
  82. package/umd/index.umd.js.map +1 -1
  83. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  84. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  85. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  86. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  87. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  88. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  89. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -31,7 +31,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
31
31
  * @generated
32
32
  * @see https://github.com/webgptorg/promptbook
33
33
  */
34
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-9';
34
+ const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
35
35
  /**
36
36
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
37
37
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2878,75 +2878,32 @@ function countUsage(llmTools) {
2878
2878
  */
2879
2879
 
2880
2880
  /**
2881
- * Predefined profiles for LLM providers to maintain consistency across the application
2882
- * These profiles represent each provider as a virtual persona in chat interfaces
2881
+ * Takes an item or an array of items and returns an array of items
2883
2882
  *
2884
- * @private !!!!
2883
+ * 1) Any item except array and undefined returns array with that one item (also null)
2884
+ * 2) Undefined returns empty array
2885
+ * 3) Array returns itself
2886
+ *
2887
+ * @private internal utility
2885
2888
  */
2886
- const LLM_PROVIDER_PROFILES = {
2887
- OPENAI: {
2888
- name: 'OPENAI',
2889
- fullname: 'OpenAI GPT',
2890
- color: '#10a37f', // OpenAI's signature green
2891
- // Note: avatarSrc could be added when we have provider logos available
2892
- },
2893
- ANTHROPIC: {
2894
- name: 'ANTHROPIC',
2895
- fullname: 'Anthropic Claude',
2896
- color: '#d97706', // Anthropic's orange/amber color
2897
- },
2898
- AZURE_OPENAI: {
2899
- name: 'AZURE_OPENAI',
2900
- fullname: 'Azure OpenAI',
2901
- color: '#0078d4', // Microsoft Azure blue
2902
- },
2903
- GOOGLE: {
2904
- name: 'GOOGLE',
2905
- fullname: 'Google Gemini',
2906
- color: '#4285f4', // Google blue
2907
- },
2908
- DEEPSEEK: {
2909
- name: 'DEEPSEEK',
2910
- fullname: 'DeepSeek',
2911
- color: '#7c3aed', // Purple color for DeepSeek
2912
- },
2913
- OLLAMA: {
2914
- name: 'OLLAMA',
2915
- fullname: 'Ollama',
2916
- color: '#059669', // Emerald green for local models
2917
- },
2918
- REMOTE: {
2919
- name: 'REMOTE',
2920
- fullname: 'Remote Server',
2921
- color: '#6b7280', // Gray for remote/proxy connections
2922
- },
2923
- MOCKED_ECHO: {
2924
- name: 'MOCKED_ECHO',
2925
- fullname: 'Echo (Test)',
2926
- color: '#8b5cf6', // Purple for test/mock tools
2927
- },
2928
- MOCKED_FAKE: {
2929
- name: 'MOCKED_FAKE',
2930
- fullname: 'Fake LLM (Test)',
2931
- color: '#ec4899', // Pink for fake/test tools
2932
- },
2933
- VERCEL: {
2934
- name: 'VERCEL',
2935
- fullname: 'Vercel AI',
2936
- color: '#000000', // Vercel's black
2937
- },
2938
- MULTIPLE: {
2939
- name: 'MULTIPLE',
2940
- fullname: 'Multiple Providers',
2941
- color: '#6366f1', // Indigo for combined/multiple providers
2942
- },
2943
- };
2889
+ function arrayableToArray(input) {
2890
+ if (input === undefined) {
2891
+ return [];
2892
+ }
2893
+ if (input instanceof Array) {
2894
+ return input;
2895
+ }
2896
+ return [input];
2897
+ }
2898
+
2944
2899
  /**
2945
- * TODO: Refactor this - each profile must be alongside the provider definition
2946
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
2947
- * Note: [💞] Ignore a discrepancy between file name and entity name
2900
+ * Profile for Multiple providers aggregation
2948
2901
  */
2949
-
2902
+ const MULTIPLE_PROVIDER_PROFILE = {
2903
+ name: 'MULTIPLE',
2904
+ fullname: 'Multiple Providers',
2905
+ color: '#6366f1',
2906
+ };
2950
2907
  /**
2951
2908
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
2952
2909
  *
@@ -2957,12 +2914,10 @@ class MultipleLlmExecutionTools {
2957
2914
  /**
2958
2915
  * Gets array of execution tools in order of priority
2959
2916
  */
2960
- constructor(...llmExecutionTools) {
2917
+ constructor(title, ...llmExecutionTools) {
2918
+ this.title = title;
2961
2919
  this.llmExecutionTools = llmExecutionTools;
2962
2920
  }
2963
- get title() {
2964
- return 'Multiple LLM Providers';
2965
- }
2966
2921
  get description() {
2967
2922
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2968
2923
  .map(({ title, description }, index) => {
@@ -2984,7 +2939,7 @@ class MultipleLlmExecutionTools {
2984
2939
  `);
2985
2940
  }
2986
2941
  get profile() {
2987
- return LLM_PROVIDER_PROFILES.MULTIPLE;
2942
+ return MULTIPLE_PROVIDER_PROFILE;
2988
2943
  }
2989
2944
  /**
2990
2945
  * Check the configuration of all execution tools
@@ -3048,7 +3003,7 @@ class MultipleLlmExecutionTools {
3048
3003
  return await llmExecutionTools.callEmbeddingModel(prompt);
3049
3004
  // <- case [ðŸĪ–]:
3050
3005
  default:
3051
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3006
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3052
3007
  }
3053
3008
  }
3054
3009
  catch (error) {
@@ -3069,7 +3024,7 @@ class MultipleLlmExecutionTools {
3069
3024
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3070
3025
  // 3) ...
3071
3026
  spaceTrim((block) => `
3072
- All execution tools failed:
3027
+ All execution tools of ${this.title} failed:
3073
3028
 
3074
3029
  ${block(errors
3075
3030
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3078,11 +3033,11 @@ class MultipleLlmExecutionTools {
3078
3033
  `));
3079
3034
  }
3080
3035
  else if (this.llmExecutionTools.length === 0) {
3081
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3036
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3082
3037
  }
3083
3038
  else {
3084
3039
  throw new PipelineExecutionError(spaceTrim((block) => `
3085
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3040
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3086
3041
 
3087
3042
  Available \`LlmExecutionTools\`:
3088
3043
  ${block(this.description)}
@@ -3112,7 +3067,7 @@ class MultipleLlmExecutionTools {
3112
3067
  *
3113
3068
  * @public exported from `@promptbook/core`
3114
3069
  */
3115
- function joinLlmExecutionTools(...llmExecutionTools) {
3070
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3116
3071
  if (llmExecutionTools.length === 0) {
3117
3072
  const warningMessage = spaceTrim(`
3118
3073
  You have not provided any \`LlmExecutionTools\`
@@ -3144,30 +3099,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3144
3099
  };
3145
3100
  */
3146
3101
  }
3147
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3102
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3148
3103
  }
3149
3104
  /**
3150
3105
  * TODO: [👷‍♂ïļ] @@@ Manual about construction of llmTools
3151
3106
  */
3152
3107
 
3153
3108
  /**
3154
- * Takes an item or an array of items and returns an array of items
3109
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3155
3110
  *
3156
- * 1) Any item except array and undefined returns array with that one item (also null)
3157
- * 2) Undefined returns empty array
3158
- * 3) Array returns itself
3159
- *
3160
- * @private internal utility
3111
+ * @public exported from `@promptbook/core`
3161
3112
  */
3162
- function arrayableToArray(input) {
3163
- if (input === undefined) {
3164
- return [];
3165
- }
3166
- if (input instanceof Array) {
3167
- return input;
3168
- }
3169
- return [input];
3113
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3114
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3115
+ const llmTools = _llms.length === 1
3116
+ ? _llms[0]
3117
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3118
+ return llmTools;
3170
3119
  }
3120
+ /**
3121
+ * TODO: [👷‍♂ïļ] @@@ Manual about construction of llmTools
3122
+ */
3171
3123
 
3172
3124
  /**
3173
3125
  * Prepares the persona for the pipeline
@@ -3186,8 +3138,7 @@ async function preparePersona(personaDescription, tools, options) {
3186
3138
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3187
3139
  tools,
3188
3140
  });
3189
- const _llms = arrayableToArray(tools.llm);
3190
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3141
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3191
3142
  const availableModels = (await llmTools.listModels())
3192
3143
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3193
3144
  .map(({ modelName, modelDescription }) => ({
@@ -3231,6 +3182,7 @@ async function preparePersona(personaDescription, tools, options) {
3231
3182
  };
3232
3183
  }
3233
3184
  /**
3185
+ * TODO: [ðŸ˜Đ] DRY `preparePersona` and `selectBestModelFromAvailable`
3234
3186
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
3235
3187
  * TODO: [ðŸĒ] Check validity of `modelName` in pipeline
3236
3188
  * TODO: [ðŸĒ] Check validity of `systemMessage` in pipeline
@@ -4349,9 +4301,7 @@ async function preparePipeline(pipeline, tools, options) {
4349
4301
  if (tools === undefined || tools.llm === undefined) {
4350
4302
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4351
4303
  }
4352
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4353
- const _llms = arrayableToArray(tools.llm);
4354
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4304
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4355
4305
  const llmToolsWithUsage = countUsage(llmTools);
4356
4306
  // <- TODO: [ðŸŒŊ]
4357
4307
  /*
@@ -5511,9 +5461,7 @@ async function executeAttempts(options) {
5511
5461
  $scriptPipelineExecutionErrors: [],
5512
5462
  $failedResults: [], // Track all failed attempts
5513
5463
  };
5514
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5515
- const _llms = arrayableToArray(tools.llm);
5516
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5464
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5517
5465
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5518
5466
  const isJokerAttempt = attemptIndex < 0;
5519
5467
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -6033,9 +5981,7 @@ async function getKnowledgeForTask(options) {
6033
5981
  return ''; // <- Note: Np knowledge present, return empty string
6034
5982
  }
6035
5983
  try {
6036
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6037
- const _llms = arrayableToArray(tools.llm);
6038
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5984
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6039
5985
  const taskEmbeddingPrompt = {
6040
5986
  title: 'Knowledge Search',
6041
5987
  modelRequirements: {
@@ -6636,13 +6582,13 @@ function createPipelineExecutor(options) {
6636
6582
  // Calculate and update tldr based on pipeline progress
6637
6583
  const cv = newOngoingResult;
6638
6584
  // Calculate progress based on parameters resolved vs total parameters
6639
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6585
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6640
6586
  let resolvedParameters = 0;
6641
6587
  let currentTaskTitle = '';
6642
6588
  // Get the resolved parameters from output parameters
6643
6589
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6644
6590
  // Count how many output parameters have non-empty values
6645
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6591
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6646
6592
  }
6647
6593
  // Try to determine current task from execution report
6648
6594
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6898,7 +6844,7 @@ function $registeredLlmToolsMessage() {
6898
6844
  * @public exported from `@promptbook/core`
6899
6845
  */
6900
6846
  function createLlmToolsFromConfiguration(configuration, options = {}) {
6901
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6847
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6902
6848
  const llmTools = configuration.map((llmConfiguration) => {
6903
6849
  const registeredItem = $llmToolsRegister
6904
6850
  .list()
@@ -6930,7 +6876,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
6930
6876
  ...llmConfiguration.options,
6931
6877
  });
6932
6878
  });
6933
- return joinLlmExecutionTools(...llmTools);
6879
+ return joinLlmExecutionTools(title, ...llmTools);
6934
6880
  }
6935
6881
  /**
6936
6882
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -8179,8 +8125,11 @@ function startRemoteServer(options) {
8179
8125
  if (isAnonymous === true) {
8180
8126
  // Note: Anonymous mode
8181
8127
  // TODO: Maybe check that configuration is not empty
8182
- const { llmToolsConfiguration } = identification;
8183
- llm = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose });
8128
+ const { userId, llmToolsConfiguration } = identification;
8129
+ llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
8130
+ title: `LLM Tools for anonymous user "${userId}" on server`,
8131
+ isVerbose,
8132
+ });
8184
8133
  }
8185
8134
  else if (isAnonymous === false && createLlmExecutionTools !== null) {
8186
8135
  // Note: Application mode
@@ -8214,7 +8163,7 @@ function startRemoteServer(options) {
8214
8163
  });
8215
8164
  // Note: OpenAI-compatible chat completions endpoint
8216
8165
  app.post('/v1/chat/completions', async (request, response) => {
8217
- // TODO: !!!! Make more promptbook-native:
8166
+ // TODO: [🧠][ðŸĶĒ] Make OpenAI compatible more promptbook-native - make reverse adapter from LlmExecutionTools to OpenAI-compatible:
8218
8167
  try {
8219
8168
  const params = request.body;
8220
8169
  const { model, messages } = params;