@promptbook/remote-server 0.101.0-17 → 0.101.0-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/esm/index.es.js +50 -40
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  6. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  7. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +7 -1
  8. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  9. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  10. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  11. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  12. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  13. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  14. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  15. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  16. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  17. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  18. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  20. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  21. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  22. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  23. package/esm/typings/src/version.d.ts +1 -1
  24. package/package.json +2 -2
  25. package/umd/index.umd.js +50 -40
  26. package/umd/index.umd.js.map +1 -1
  27. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  28. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  29. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  30. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  31. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/esm/index.es.js CHANGED
@@ -31,7 +31,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
31
31
  * @generated
32
32
  * @see https://github.com/webgptorg/promptbook
33
33
  */
34
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-17';
34
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-19';
35
35
  /**
36
36
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
37
37
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2877,6 +2877,25 @@ function countUsage(llmTools) {
2877
2877
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2878
2878
  */
2879
2879
 
2880
+ /**
2881
+ * Takes an item or an array of items and returns an array of items
2882
+ *
2883
+ * 1) Any item except array and undefined returns array with that one item (also null)
2884
+ * 2) Undefined returns empty array
2885
+ * 3) Array returns itself
2886
+ *
2887
+ * @private internal utility
2888
+ */
2889
+ function arrayableToArray(input) {
2890
+ if (input === undefined) {
2891
+ return [];
2892
+ }
2893
+ if (input instanceof Array) {
2894
+ return input;
2895
+ }
2896
+ return [input];
2897
+ }
2898
+
2880
2899
  /**
2881
2900
  * Predefined profiles for LLM providers to maintain consistency across the application
2882
2901
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -2957,12 +2976,10 @@ class MultipleLlmExecutionTools {
2957
2976
  /**
2958
2977
  * Gets array of execution tools in order of priority
2959
2978
  */
2960
- constructor(...llmExecutionTools) {
2979
+ constructor(title, ...llmExecutionTools) {
2980
+ this.title = title;
2961
2981
  this.llmExecutionTools = llmExecutionTools;
2962
2982
  }
2963
- get title() {
2964
- return 'Multiple LLM Providers';
2965
- }
2966
2983
  get description() {
2967
2984
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2968
2985
  .map(({ title, description }, index) => {
@@ -3048,7 +3065,7 @@ class MultipleLlmExecutionTools {
3048
3065
  return await llmExecutionTools.callEmbeddingModel(prompt);
3049
3066
  // <- case [🤖]:
3050
3067
  default:
3051
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3068
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3052
3069
  }
3053
3070
  }
3054
3071
  catch (error) {
@@ -3069,7 +3086,7 @@ class MultipleLlmExecutionTools {
3069
3086
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3070
3087
  // 3) ...
3071
3088
  spaceTrim((block) => `
3072
- All execution tools failed:
3089
+ All execution tools of ${this.title} failed:
3073
3090
 
3074
3091
  ${block(errors
3075
3092
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3078,11 +3095,11 @@ class MultipleLlmExecutionTools {
3078
3095
  `));
3079
3096
  }
3080
3097
  else if (this.llmExecutionTools.length === 0) {
3081
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3098
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3082
3099
  }
3083
3100
  else {
3084
3101
  throw new PipelineExecutionError(spaceTrim((block) => `
3085
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3102
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3086
3103
 
3087
3104
  Available \`LlmExecutionTools\`:
3088
3105
  ${block(this.description)}
@@ -3112,7 +3129,7 @@ class MultipleLlmExecutionTools {
3112
3129
  *
3113
3130
  * @public exported from `@promptbook/core`
3114
3131
  */
3115
- function joinLlmExecutionTools(...llmExecutionTools) {
3132
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3116
3133
  if (llmExecutionTools.length === 0) {
3117
3134
  const warningMessage = spaceTrim(`
3118
3135
  You have not provided any \`LlmExecutionTools\`
@@ -3144,30 +3161,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3144
3161
  };
3145
3162
  */
3146
3163
  }
3147
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3164
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3148
3165
  }
3149
3166
  /**
3150
3167
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3151
3168
  */
3152
3169
 
3153
3170
  /**
3154
- * Takes an item or an array of items and returns an array of items
3155
- *
3156
- * 1) Any item except array and undefined returns array with that one item (also null)
3157
- * 2) Undefined returns empty array
3158
- * 3) Array returns itself
3171
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3159
3172
  *
3160
- * @private internal utility
3173
+ * @public exported from `@promptbook/core`
3161
3174
  */
3162
- function arrayableToArray(input) {
3163
- if (input === undefined) {
3164
- return [];
3165
- }
3166
- if (input instanceof Array) {
3167
- return input;
3168
- }
3169
- return [input];
3175
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3176
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3177
+ const llmTools = _llms.length === 1
3178
+ ? _llms[0]
3179
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3180
+ return llmTools;
3170
3181
  }
3182
+ /**
3183
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3184
+ */
3171
3185
 
3172
3186
  /**
3173
3187
  * Prepares the persona for the pipeline
@@ -3186,8 +3200,7 @@ async function preparePersona(personaDescription, tools, options) {
3186
3200
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3187
3201
  tools,
3188
3202
  });
3189
- const _llms = arrayableToArray(tools.llm);
3190
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3203
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3191
3204
  const availableModels = (await llmTools.listModels())
3192
3205
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3193
3206
  .map(({ modelName, modelDescription }) => ({
@@ -4350,9 +4363,7 @@ async function preparePipeline(pipeline, tools, options) {
4350
4363
  if (tools === undefined || tools.llm === undefined) {
4351
4364
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4352
4365
  }
4353
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4354
- const _llms = arrayableToArray(tools.llm);
4355
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4366
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4356
4367
  const llmToolsWithUsage = countUsage(llmTools);
4357
4368
  // <- TODO: [🌯]
4358
4369
  /*
@@ -5512,9 +5523,7 @@ async function executeAttempts(options) {
5512
5523
  $scriptPipelineExecutionErrors: [],
5513
5524
  $failedResults: [], // Track all failed attempts
5514
5525
  };
5515
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5516
- const _llms = arrayableToArray(tools.llm);
5517
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5526
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5518
5527
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5519
5528
  const isJokerAttempt = attemptIndex < 0;
5520
5529
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -6034,9 +6043,7 @@ async function getKnowledgeForTask(options) {
6034
6043
  return ''; // <- Note: Np knowledge present, return empty string
6035
6044
  }
6036
6045
  try {
6037
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6038
- const _llms = arrayableToArray(tools.llm);
6039
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6046
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6040
6047
  const taskEmbeddingPrompt = {
6041
6048
  title: 'Knowledge Search',
6042
6049
  modelRequirements: {
@@ -6899,7 +6906,7 @@ function $registeredLlmToolsMessage() {
6899
6906
  * @public exported from `@promptbook/core`
6900
6907
  */
6901
6908
  function createLlmToolsFromConfiguration(configuration, options = {}) {
6902
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6909
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6903
6910
  const llmTools = configuration.map((llmConfiguration) => {
6904
6911
  const registeredItem = $llmToolsRegister
6905
6912
  .list()
@@ -6931,7 +6938,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
6931
6938
  ...llmConfiguration.options,
6932
6939
  });
6933
6940
  });
6934
- return joinLlmExecutionTools(...llmTools);
6941
+ return joinLlmExecutionTools(title, ...llmTools);
6935
6942
  }
6936
6943
  /**
6937
6944
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -8180,8 +8187,11 @@ function startRemoteServer(options) {
8180
8187
  if (isAnonymous === true) {
8181
8188
  // Note: Anonymous mode
8182
8189
  // TODO: Maybe check that configuration is not empty
8183
- const { llmToolsConfiguration } = identification;
8184
- llm = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose });
8190
+ const { userId, llmToolsConfiguration } = identification;
8191
+ llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
8192
+ title: `LLM Tools for anonymous user "${userId}" on server`,
8193
+ isVerbose,
8194
+ });
8185
8195
  }
8186
8196
  else if (isAnonymous === false && createLlmExecutionTools !== null) {
8187
8197
  // Note: Application mode