@promptbook/remote-server 0.101.0-9 → 0.102.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/esm/index.es.js +58 -109
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +30 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +12 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +11 -4
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +0 -12
  11. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +0 -24
  12. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +0 -12
  13. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +0 -12
  14. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +0 -6
  15. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +0 -12
  16. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +0 -12
  17. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +0 -6
  18. package/esm/typings/src/book-2.0/commitments/META_IMAGE/META_IMAGE.d.ts +0 -6
  19. package/esm/typings/src/book-2.0/commitments/META_LINK/META_LINK.d.ts +0 -6
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +23 -14
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +2 -14
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +0 -12
  23. package/esm/typings/src/book-2.0/commitments/RULE/RULE.d.ts +0 -12
  24. package/esm/typings/src/book-2.0/commitments/SAMPLE/SAMPLE.d.ts +0 -12
  25. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +0 -12
  26. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +0 -12
  27. package/esm/typings/src/book-2.0/commitments/_base/createEmptyAgentModelRequirements.d.ts +1 -1
  28. package/esm/typings/src/book-2.0/commitments/index.d.ts +1 -1
  29. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  30. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +5 -2
  31. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/AvatarProfile.d.ts +3 -0
  32. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +18 -1
  33. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +8 -0
  34. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +2 -15
  35. package/esm/typings/src/book-components/Chat/Chat/Chat.d.ts +5 -1
  36. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +9 -0
  37. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  38. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  39. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  40. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  41. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  42. package/esm/typings/src/book-components/Chat/utils/savePlugins.d.ts +55 -0
  43. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  44. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  45. package/esm/typings/src/execution/PromptResult.d.ts +2 -4
  46. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  47. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  48. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  49. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  50. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  51. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  52. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  53. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  54. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +7 -18
  55. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  56. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  57. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +58 -0
  58. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  59. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  60. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  61. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  62. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +3 -8
  63. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +4 -5
  64. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  65. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +4 -10
  66. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +4 -6
  67. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +3 -3
  68. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +16 -8
  69. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -8
  70. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +5 -14
  71. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  72. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  73. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  74. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  75. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  76. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  77. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +0 -1
  78. package/esm/typings/src/utils/markdown/promptbookifyAiText.d.ts +2 -2
  79. package/esm/typings/src/version.d.ts +1 -1
  80. package/package.json +2 -2
  81. package/umd/index.umd.js +58 -109
  82. package/umd/index.umd.js.map +1 -1
  83. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  84. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  85. package/esm/typings/src/book-components/Chat/examples/ChatMarkdownDemo.d.ts +0 -16
  86. package/esm/typings/src/expectations/drafts/isDomainNameFree.d.ts +0 -10
  87. package/esm/typings/src/expectations/drafts/isGithubNameFree.d.ts +0 -10
  88. package/esm/typings/src/llm-providers/_common/profiles/llmProviderProfiles.d.ts +0 -81
  89. /package/esm/typings/src/llm-providers/_common/{profiles/test/llmProviderProfiles.test.d.ts → utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -2,11 +2,11 @@ import { string_markdown } from '../../types/typeAliases';
2
2
  /**
3
3
  * Function `promptbookifyAiText` will slightly modify the text so we know it was processed by Promptbook
4
4
  *
5
+ * Note: [🔂] This function is idempotent.
6
+ *
5
7
  * @public exported from `@promptbook/markdown-utils`
6
8
  */
7
9
  export declare function promptbookifyAiText(text: string_markdown): string_markdown;
8
10
  /**
9
- * TODO: !!!!! Make the function idempotent and add "Note: [🔂] This function is idempotent."
10
- * TODO: [ðŸ…ūïļ]!!! Use this across the project where AI text is involved
11
11
  * TODO: [🧠][✌ïļ] Make some Promptbook-native token system
12
12
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-8`).
18
+ * It follows semantic versioning (e.g., `0.101.0`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/remote-server",
3
- "version": "0.101.0-9",
3
+ "version": "0.102.0-0",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/remote-server.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.101.0-9"
98
+ "@promptbook/core": "0.102.0-0"
99
99
  },
100
100
  "dependencies": {
101
101
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -47,7 +47,7 @@
47
47
  * @generated
48
48
  * @see https://github.com/webgptorg/promptbook
49
49
  */
50
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-9';
50
+ const PROMPTBOOK_ENGINE_VERSION = '0.102.0-0';
51
51
  /**
52
52
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
53
53
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2894,75 +2894,32 @@
2894
2894
  */
2895
2895
 
2896
2896
  /**
2897
- * Predefined profiles for LLM providers to maintain consistency across the application
2898
- * These profiles represent each provider as a virtual persona in chat interfaces
2897
+ * Takes an item or an array of items and returns an array of items
2899
2898
  *
2900
- * @private !!!!
2899
+ * 1) Any item except array and undefined returns array with that one item (also null)
2900
+ * 2) Undefined returns empty array
2901
+ * 3) Array returns itself
2902
+ *
2903
+ * @private internal utility
2901
2904
  */
2902
- const LLM_PROVIDER_PROFILES = {
2903
- OPENAI: {
2904
- name: 'OPENAI',
2905
- fullname: 'OpenAI GPT',
2906
- color: '#10a37f', // OpenAI's signature green
2907
- // Note: avatarSrc could be added when we have provider logos available
2908
- },
2909
- ANTHROPIC: {
2910
- name: 'ANTHROPIC',
2911
- fullname: 'Anthropic Claude',
2912
- color: '#d97706', // Anthropic's orange/amber color
2913
- },
2914
- AZURE_OPENAI: {
2915
- name: 'AZURE_OPENAI',
2916
- fullname: 'Azure OpenAI',
2917
- color: '#0078d4', // Microsoft Azure blue
2918
- },
2919
- GOOGLE: {
2920
- name: 'GOOGLE',
2921
- fullname: 'Google Gemini',
2922
- color: '#4285f4', // Google blue
2923
- },
2924
- DEEPSEEK: {
2925
- name: 'DEEPSEEK',
2926
- fullname: 'DeepSeek',
2927
- color: '#7c3aed', // Purple color for DeepSeek
2928
- },
2929
- OLLAMA: {
2930
- name: 'OLLAMA',
2931
- fullname: 'Ollama',
2932
- color: '#059669', // Emerald green for local models
2933
- },
2934
- REMOTE: {
2935
- name: 'REMOTE',
2936
- fullname: 'Remote Server',
2937
- color: '#6b7280', // Gray for remote/proxy connections
2938
- },
2939
- MOCKED_ECHO: {
2940
- name: 'MOCKED_ECHO',
2941
- fullname: 'Echo (Test)',
2942
- color: '#8b5cf6', // Purple for test/mock tools
2943
- },
2944
- MOCKED_FAKE: {
2945
- name: 'MOCKED_FAKE',
2946
- fullname: 'Fake LLM (Test)',
2947
- color: '#ec4899', // Pink for fake/test tools
2948
- },
2949
- VERCEL: {
2950
- name: 'VERCEL',
2951
- fullname: 'Vercel AI',
2952
- color: '#000000', // Vercel's black
2953
- },
2954
- MULTIPLE: {
2955
- name: 'MULTIPLE',
2956
- fullname: 'Multiple Providers',
2957
- color: '#6366f1', // Indigo for combined/multiple providers
2958
- },
2959
- };
2905
+ function arrayableToArray(input) {
2906
+ if (input === undefined) {
2907
+ return [];
2908
+ }
2909
+ if (input instanceof Array) {
2910
+ return input;
2911
+ }
2912
+ return [input];
2913
+ }
2914
+
2960
2915
  /**
2961
- * TODO: Refactor this - each profile must be alongside the provider definition
2962
- * TODO: [🕛] Unite `AgentBasicInformation`, `ChatParticipant`, `LlmExecutionTools` + `LlmToolsMetadata`
2963
- * Note: [💞] Ignore a discrepancy between file name and entity name
2916
+ * Profile for Multiple providers aggregation
2964
2917
  */
2965
-
2918
+ const MULTIPLE_PROVIDER_PROFILE = {
2919
+ name: 'MULTIPLE',
2920
+ fullname: 'Multiple Providers',
2921
+ color: '#6366f1',
2922
+ };
2966
2923
  /**
2967
2924
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
2968
2925
  *
@@ -2973,12 +2930,10 @@
2973
2930
  /**
2974
2931
  * Gets array of execution tools in order of priority
2975
2932
  */
2976
- constructor(...llmExecutionTools) {
2933
+ constructor(title, ...llmExecutionTools) {
2934
+ this.title = title;
2977
2935
  this.llmExecutionTools = llmExecutionTools;
2978
2936
  }
2979
- get title() {
2980
- return 'Multiple LLM Providers';
2981
- }
2982
2937
  get description() {
2983
2938
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2984
2939
  .map(({ title, description }, index) => {
@@ -3000,7 +2955,7 @@
3000
2955
  `);
3001
2956
  }
3002
2957
  get profile() {
3003
- return LLM_PROVIDER_PROFILES.MULTIPLE;
2958
+ return MULTIPLE_PROVIDER_PROFILE;
3004
2959
  }
3005
2960
  /**
3006
2961
  * Check the configuration of all execution tools
@@ -3064,7 +3019,7 @@
3064
3019
  return await llmExecutionTools.callEmbeddingModel(prompt);
3065
3020
  // <- case [ðŸĪ–]:
3066
3021
  default:
3067
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3022
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3068
3023
  }
3069
3024
  }
3070
3025
  catch (error) {
@@ -3085,7 +3040,7 @@
3085
3040
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3086
3041
  // 3) ...
3087
3042
  spaceTrim__default["default"]((block) => `
3088
- All execution tools failed:
3043
+ All execution tools of ${this.title} failed:
3089
3044
 
3090
3045
  ${block(errors
3091
3046
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3094,11 +3049,11 @@
3094
3049
  `));
3095
3050
  }
3096
3051
  else if (this.llmExecutionTools.length === 0) {
3097
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3052
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3098
3053
  }
3099
3054
  else {
3100
3055
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3101
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3056
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3102
3057
 
3103
3058
  Available \`LlmExecutionTools\`:
3104
3059
  ${block(this.description)}
@@ -3128,7 +3083,7 @@
3128
3083
  *
3129
3084
  * @public exported from `@promptbook/core`
3130
3085
  */
3131
- function joinLlmExecutionTools(...llmExecutionTools) {
3086
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3132
3087
  if (llmExecutionTools.length === 0) {
3133
3088
  const warningMessage = spaceTrim__default["default"](`
3134
3089
  You have not provided any \`LlmExecutionTools\`
@@ -3160,30 +3115,27 @@
3160
3115
  };
3161
3116
  */
3162
3117
  }
3163
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3118
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3164
3119
  }
3165
3120
  /**
3166
3121
  * TODO: [👷‍♂ïļ] @@@ Manual about construction of llmTools
3167
3122
  */
3168
3123
 
3169
3124
  /**
3170
- * Takes an item or an array of items and returns an array of items
3125
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3171
3126
  *
3172
- * 1) Any item except array and undefined returns array with that one item (also null)
3173
- * 2) Undefined returns empty array
3174
- * 3) Array returns itself
3175
- *
3176
- * @private internal utility
3127
+ * @public exported from `@promptbook/core`
3177
3128
  */
3178
- function arrayableToArray(input) {
3179
- if (input === undefined) {
3180
- return [];
3181
- }
3182
- if (input instanceof Array) {
3183
- return input;
3184
- }
3185
- return [input];
3129
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3130
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3131
+ const llmTools = _llms.length === 1
3132
+ ? _llms[0]
3133
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3134
+ return llmTools;
3186
3135
  }
3136
+ /**
3137
+ * TODO: [👷‍♂ïļ] @@@ Manual about construction of llmTools
3138
+ */
3187
3139
 
3188
3140
  /**
3189
3141
  * Prepares the persona for the pipeline
@@ -3202,8 +3154,7 @@
3202
3154
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3203
3155
  tools,
3204
3156
  });
3205
- const _llms = arrayableToArray(tools.llm);
3206
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3157
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3207
3158
  const availableModels = (await llmTools.listModels())
3208
3159
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3209
3160
  .map(({ modelName, modelDescription }) => ({
@@ -3247,6 +3198,7 @@
3247
3198
  };
3248
3199
  }
3249
3200
  /**
3201
+ * TODO: [ðŸ˜Đ] DRY `preparePersona` and `selectBestModelFromAvailable`
3250
3202
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
3251
3203
  * TODO: [ðŸĒ] Check validity of `modelName` in pipeline
3252
3204
  * TODO: [ðŸĒ] Check validity of `systemMessage` in pipeline
@@ -4365,9 +4317,7 @@
4365
4317
  if (tools === undefined || tools.llm === undefined) {
4366
4318
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4367
4319
  }
4368
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4369
- const _llms = arrayableToArray(tools.llm);
4370
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4320
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4371
4321
  const llmToolsWithUsage = countUsage(llmTools);
4372
4322
  // <- TODO: [ðŸŒŊ]
4373
4323
  /*
@@ -5527,9 +5477,7 @@
5527
5477
  $scriptPipelineExecutionErrors: [],
5528
5478
  $failedResults: [], // Track all failed attempts
5529
5479
  };
5530
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5531
- const _llms = arrayableToArray(tools.llm);
5532
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5480
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5533
5481
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5534
5482
  const isJokerAttempt = attemptIndex < 0;
5535
5483
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -6049,9 +5997,7 @@
6049
5997
  return ''; // <- Note: Np knowledge present, return empty string
6050
5998
  }
6051
5999
  try {
6052
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6053
- const _llms = arrayableToArray(tools.llm);
6054
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6000
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6055
6001
  const taskEmbeddingPrompt = {
6056
6002
  title: 'Knowledge Search',
6057
6003
  modelRequirements: {
@@ -6652,13 +6598,13 @@
6652
6598
  // Calculate and update tldr based on pipeline progress
6653
6599
  const cv = newOngoingResult;
6654
6600
  // Calculate progress based on parameters resolved vs total parameters
6655
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6601
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6656
6602
  let resolvedParameters = 0;
6657
6603
  let currentTaskTitle = '';
6658
6604
  // Get the resolved parameters from output parameters
6659
6605
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6660
6606
  // Count how many output parameters have non-empty values
6661
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6607
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6662
6608
  }
6663
6609
  // Try to determine current task from execution report
6664
6610
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6914,7 +6860,7 @@
6914
6860
  * @public exported from `@promptbook/core`
6915
6861
  */
6916
6862
  function createLlmToolsFromConfiguration(configuration, options = {}) {
6917
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6863
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6918
6864
  const llmTools = configuration.map((llmConfiguration) => {
6919
6865
  const registeredItem = $llmToolsRegister
6920
6866
  .list()
@@ -6946,7 +6892,7 @@
6946
6892
  ...llmConfiguration.options,
6947
6893
  });
6948
6894
  });
6949
- return joinLlmExecutionTools(...llmTools);
6895
+ return joinLlmExecutionTools(title, ...llmTools);
6950
6896
  }
6951
6897
  /**
6952
6898
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -8195,8 +8141,11 @@
8195
8141
  if (isAnonymous === true) {
8196
8142
  // Note: Anonymous mode
8197
8143
  // TODO: Maybe check that configuration is not empty
8198
- const { llmToolsConfiguration } = identification;
8199
- llm = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose });
8144
+ const { userId, llmToolsConfiguration } = identification;
8145
+ llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
8146
+ title: `LLM Tools for anonymous user "${userId}" on server`,
8147
+ isVerbose,
8148
+ });
8200
8149
  }
8201
8150
  else if (isAnonymous === false && createLlmExecutionTools !== null) {
8202
8151
  // Note: Application mode
@@ -8230,7 +8179,7 @@
8230
8179
  });
8231
8180
  // Note: OpenAI-compatible chat completions endpoint
8232
8181
  app.post('/v1/chat/completions', async (request, response) => {
8233
- // TODO: !!!! Make more promptbook-native:
8182
+ // TODO: [🧠][ðŸĶĒ] Make OpenAI compatible more promptbook-native - make reverse adapter from LlmExecutionTools to OpenAI-compatible:
8234
8183
  try {
8235
8184
  const params = request.body;
8236
8185
  const { model, messages } = params;