@promptbook/remote-server 0.101.0-2 → 0.101.0-20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/README.md +45 -0
  2. package/esm/index.es.js +53 -42
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/components.index.d.ts +20 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +14 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +14 -0
  7. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
  8. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  11. package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
  12. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
  13. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
  14. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
  15. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
  16. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
  17. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
  18. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
  19. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
  20. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
  21. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
  22. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
  23. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
  24. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
  25. package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
  26. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  27. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
  28. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
  29. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
  30. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
  31. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
  32. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  33. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  34. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  35. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  36. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  37. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  38. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  39. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  40. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  41. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  42. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  45. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  46. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  47. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  48. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  49. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  50. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  51. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
  52. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  53. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  54. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  55. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  56. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  57. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  58. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  59. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
  60. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  61. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  62. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  63. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  64. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  65. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  66. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
  67. package/esm/typings/src/version.d.ts +1 -1
  68. package/package.json +2 -2
  69. package/umd/index.umd.js +53 -42
  70. package/umd/index.umd.js.map +1 -1
  71. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  72. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  73. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  74. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  75. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  76. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  77. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
package/README.md CHANGED
@@ -49,6 +49,51 @@ npm install @promptbook/remote-server
49
49
  ```
50
50
 
51
51
 
52
+ Remote server implementation for Promptbook, enabling distributed execution of promptbook pipelines across network boundaries with REST API and WebSocket support.
53
+
54
+ ## 🎯 Purpose and Motivation
55
+
56
+ This package provides a remote server that allows promptbook pipelines to be executed over the network. It enables distributed architectures where promptbook execution can be centralized on powerful servers while clients can access the functionality remotely, making it ideal for scaling promptbook applications and providing API access to promptbook collections.
57
+
58
+ ## 🔧 High-Level Functionality
59
+
60
+ The package provides remote server capabilities:
61
+ - **HTTP REST API**: RESTful endpoints for pipeline execution and management
62
+ - **WebSocket Support**: Real-time communication for streaming execution results
63
+ - **Authentication**: Support for both anonymous and application-based authentication
64
+ - **Pipeline Management**: Remote access to promptbook collections and pipelines
65
+ - **Execution Orchestration**: Distributed execution of promptbook pipelines
66
+ - **OpenAI Compatibility**: OpenAI-compatible API endpoints for seamless integration
67
+
68
+ ## ✨ Key Features
69
+
70
+ - 🌐 **Remote Execution** - Execute promptbook pipelines over HTTP/WebSocket
71
+ - 🔐 **Authentication Modes** - Support for anonymous and application-based access
72
+ - 📡 **Real-time Communication** - WebSocket support for streaming results
73
+ - 🔌 **OpenAI Compatible** - Use promptbooks as OpenAI-compatible models
74
+ - 🚀 **Scalable Architecture** - Distribute promptbook execution across servers
75
+ - 📊 **Pipeline Management** - Remote access to collections and individual pipelines
76
+ - 🛡️ **Security** - Configurable authentication and access control
77
+ - ⚡ **High Performance** - Optimized for concurrent pipeline execution
78
+
79
+ ## 📦 Exported Entities
80
+
81
+ ### Version Information
82
+ - `BOOK_LANGUAGE_VERSION` - Current book language version
83
+ - `PROMPTBOOK_ENGINE_VERSION` - Current engine version
84
+
85
+ ### Server Management
86
+ - `startRemoteServer` - Start the remote promptbook server
87
+
88
+ ### Configuration Types
89
+ - `RemoteServerOptions` - Configuration options for remote server (type)
90
+
91
+ ### Authentication Types
92
+ - `Identification` - Base identification interface (type)
93
+ - `ApplicationModeIdentification` - Application mode identification (type)
94
+ - `AnonymousModeIdentification` - Anonymous mode identification (type)
95
+
96
+ > 💡 This package provides remote server functionality for promptbook applications. For the core functionality, see [@promptbook/core](#-packages) or install all packages with `npm i ptbk`
52
97
 
53
98
 
54
99
  ---
package/esm/index.es.js CHANGED
@@ -31,7 +31,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
31
31
  * @generated
32
32
  * @see https://github.com/webgptorg/promptbook
33
33
  */
34
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-2';
34
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
35
35
  /**
36
36
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
37
37
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2877,6 +2877,25 @@ function countUsage(llmTools) {
2877
2877
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2878
2878
  */
2879
2879
 
2880
+ /**
2881
+ * Takes an item or an array of items and returns an array of items
2882
+ *
2883
+ * 1) Any item except array and undefined returns array with that one item (also null)
2884
+ * 2) Undefined returns empty array
2885
+ * 3) Array returns itself
2886
+ *
2887
+ * @private internal utility
2888
+ */
2889
+ function arrayableToArray(input) {
2890
+ if (input === undefined) {
2891
+ return [];
2892
+ }
2893
+ if (input instanceof Array) {
2894
+ return input;
2895
+ }
2896
+ return [input];
2897
+ }
2898
+
2880
2899
  /**
2881
2900
  * Predefined profiles for LLM providers to maintain consistency across the application
2882
2901
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -2957,12 +2976,10 @@ class MultipleLlmExecutionTools {
2957
2976
  /**
2958
2977
  * Gets array of execution tools in order of priority
2959
2978
  */
2960
- constructor(...llmExecutionTools) {
2979
+ constructor(title, ...llmExecutionTools) {
2980
+ this.title = title;
2961
2981
  this.llmExecutionTools = llmExecutionTools;
2962
2982
  }
2963
- get title() {
2964
- return 'Multiple LLM Providers';
2965
- }
2966
2983
  get description() {
2967
2984
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
2968
2985
  .map(({ title, description }, index) => {
@@ -3048,7 +3065,7 @@ class MultipleLlmExecutionTools {
3048
3065
  return await llmExecutionTools.callEmbeddingModel(prompt);
3049
3066
  // <- case [🤖]:
3050
3067
  default:
3051
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3068
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3052
3069
  }
3053
3070
  }
3054
3071
  catch (error) {
@@ -3069,7 +3086,7 @@ class MultipleLlmExecutionTools {
3069
3086
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3070
3087
  // 3) ...
3071
3088
  spaceTrim((block) => `
3072
- All execution tools failed:
3089
+ All execution tools of ${this.title} failed:
3073
3090
 
3074
3091
  ${block(errors
3075
3092
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3078,11 +3095,11 @@ class MultipleLlmExecutionTools {
3078
3095
  `));
3079
3096
  }
3080
3097
  else if (this.llmExecutionTools.length === 0) {
3081
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3098
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3082
3099
  }
3083
3100
  else {
3084
3101
  throw new PipelineExecutionError(spaceTrim((block) => `
3085
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3102
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3086
3103
 
3087
3104
  Available \`LlmExecutionTools\`:
3088
3105
  ${block(this.description)}
@@ -3112,7 +3129,7 @@ class MultipleLlmExecutionTools {
3112
3129
  *
3113
3130
  * @public exported from `@promptbook/core`
3114
3131
  */
3115
- function joinLlmExecutionTools(...llmExecutionTools) {
3132
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3116
3133
  if (llmExecutionTools.length === 0) {
3117
3134
  const warningMessage = spaceTrim(`
3118
3135
  You have not provided any \`LlmExecutionTools\`
@@ -3144,30 +3161,27 @@ function joinLlmExecutionTools(...llmExecutionTools) {
3144
3161
  };
3145
3162
  */
3146
3163
  }
3147
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3164
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3148
3165
  }
3149
3166
  /**
3150
3167
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3151
3168
  */
3152
3169
 
3153
3170
  /**
3154
- * Takes an item or an array of items and returns an array of items
3155
- *
3156
- * 1) Any item except array and undefined returns array with that one item (also null)
3157
- * 2) Undefined returns empty array
3158
- * 3) Array returns itself
3171
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3159
3172
  *
3160
- * @private internal utility
3173
+ * @public exported from `@promptbook/core`
3161
3174
  */
3162
- function arrayableToArray(input) {
3163
- if (input === undefined) {
3164
- return [];
3165
- }
3166
- if (input instanceof Array) {
3167
- return input;
3168
- }
3169
- return [input];
3175
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3176
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3177
+ const llmTools = _llms.length === 1
3178
+ ? _llms[0]
3179
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3180
+ return llmTools;
3170
3181
  }
3182
+ /**
3183
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3184
+ */
3171
3185
 
3172
3186
  /**
3173
3187
  * Prepares the persona for the pipeline
@@ -3186,8 +3200,7 @@ async function preparePersona(personaDescription, tools, options) {
3186
3200
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3187
3201
  tools,
3188
3202
  });
3189
- const _llms = arrayableToArray(tools.llm);
3190
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3203
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3191
3204
  const availableModels = (await llmTools.listModels())
3192
3205
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3193
3206
  .map(({ modelName, modelDescription }) => ({
@@ -3231,6 +3244,7 @@ async function preparePersona(personaDescription, tools, options) {
3231
3244
  };
3232
3245
  }
3233
3246
  /**
3247
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
3234
3248
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
3235
3249
  * TODO: [🏢] Check validity of `modelName` in pipeline
3236
3250
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -4349,9 +4363,7 @@ async function preparePipeline(pipeline, tools, options) {
4349
4363
  if (tools === undefined || tools.llm === undefined) {
4350
4364
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4351
4365
  }
4352
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4353
- const _llms = arrayableToArray(tools.llm);
4354
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4366
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4355
4367
  const llmToolsWithUsage = countUsage(llmTools);
4356
4368
  // <- TODO: [🌯]
4357
4369
  /*
@@ -5511,9 +5523,7 @@ async function executeAttempts(options) {
5511
5523
  $scriptPipelineExecutionErrors: [],
5512
5524
  $failedResults: [], // Track all failed attempts
5513
5525
  };
5514
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5515
- const _llms = arrayableToArray(tools.llm);
5516
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5526
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5517
5527
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5518
5528
  const isJokerAttempt = attemptIndex < 0;
5519
5529
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -6033,9 +6043,7 @@ async function getKnowledgeForTask(options) {
6033
6043
  return ''; // <- Note: Np knowledge present, return empty string
6034
6044
  }
6035
6045
  try {
6036
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6037
- const _llms = arrayableToArray(tools.llm);
6038
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6046
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6039
6047
  const taskEmbeddingPrompt = {
6040
6048
  title: 'Knowledge Search',
6041
6049
  modelRequirements: {
@@ -6636,13 +6644,13 @@ function createPipelineExecutor(options) {
6636
6644
  // Calculate and update tldr based on pipeline progress
6637
6645
  const cv = newOngoingResult;
6638
6646
  // Calculate progress based on parameters resolved vs total parameters
6639
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6647
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6640
6648
  let resolvedParameters = 0;
6641
6649
  let currentTaskTitle = '';
6642
6650
  // Get the resolved parameters from output parameters
6643
6651
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6644
6652
  // Count how many output parameters have non-empty values
6645
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6653
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6646
6654
  }
6647
6655
  // Try to determine current task from execution report
6648
6656
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6898,7 +6906,7 @@ function $registeredLlmToolsMessage() {
6898
6906
  * @public exported from `@promptbook/core`
6899
6907
  */
6900
6908
  function createLlmToolsFromConfiguration(configuration, options = {}) {
6901
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6909
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
6902
6910
  const llmTools = configuration.map((llmConfiguration) => {
6903
6911
  const registeredItem = $llmToolsRegister
6904
6912
  .list()
@@ -6930,7 +6938,7 @@ function createLlmToolsFromConfiguration(configuration, options = {}) {
6930
6938
  ...llmConfiguration.options,
6931
6939
  });
6932
6940
  });
6933
- return joinLlmExecutionTools(...llmTools);
6941
+ return joinLlmExecutionTools(title, ...llmTools);
6934
6942
  }
6935
6943
  /**
6936
6944
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -8179,8 +8187,11 @@ function startRemoteServer(options) {
8179
8187
  if (isAnonymous === true) {
8180
8188
  // Note: Anonymous mode
8181
8189
  // TODO: Maybe check that configuration is not empty
8182
- const { llmToolsConfiguration } = identification;
8183
- llm = createLlmToolsFromConfiguration(llmToolsConfiguration, { isVerbose });
8190
+ const { userId, llmToolsConfiguration } = identification;
8191
+ llm = createLlmToolsFromConfiguration(llmToolsConfiguration, {
8192
+ title: `LLM Tools for anonymous user "${userId}" on server`,
8193
+ isVerbose,
8194
+ });
8184
8195
  }
8185
8196
  else if (isAnonymous === false && createLlmExecutionTools !== null) {
8186
8197
  // Note: Application mode