@promptbook/node 0.101.0-2 → 0.101.0-20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/esm/index.es.js +55 -46
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +20 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +14 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
  19. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
  20. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
  21. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
  22. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
  23. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
  24. package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
  25. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  26. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
  27. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
  28. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
  29. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
  30. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
  31. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  32. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  33. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  34. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  35. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  36. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  37. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  38. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  39. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  40. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  41. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  45. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  46. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  47. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  48. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  49. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  50. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
  51. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  52. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  53. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  54. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  55. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  56. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  57. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  58. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
  59. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  60. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  61. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  62. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  63. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  64. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  65. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
  66. package/esm/typings/src/version.d.ts +1 -1
  67. package/package.json +2 -2
  68. package/umd/index.umd.js +55 -46
  69. package/umd/index.umd.js.map +1 -1
  70. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  71. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  72. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  73. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  74. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  75. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  76. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -14,4 +14,4 @@ export {};
14
14
  /**
15
15
  * TODO: [🧠] What is the better solution - `- xxx`, - `- xxx` or preserve (see also next TODO)
16
16
  * TODO: When existing commands 1) as 2) number 3) list, add 4) new command as next number
17
- */
17
+ */
@@ -9,5 +9,6 @@ import { string_markdown } from '../../types/typeAliases';
9
9
  */
10
10
  export declare function humanizeAiText(aiText: string_markdown): string_markdown;
11
11
  /**
12
+ * TODO: [🧠] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
12
13
  * TODO: [🅾️] !!! Use this across the project where AI text is involved
13
14
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-1`).
18
+ * It follows semantic versioning (e.g., `0.101.0-19`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.101.0-2",
3
+ "version": "0.101.0-20",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -93,7 +93,7 @@
93
93
  "module": "./esm/index.es.js",
94
94
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
95
95
  "peerDependencies": {
96
- "@promptbook/core": "0.101.0-2"
96
+ "@promptbook/core": "0.101.0-20"
97
97
  },
98
98
  "dependencies": {
99
99
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-2';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3027,6 +3027,25 @@
3027
3027
  return mappedParameters;
3028
3028
  }
3029
3029
 
3030
+ /**
3031
+ * Takes an item or an array of items and returns an array of items
3032
+ *
3033
+ * 1) Any item except array and undefined returns array with that one item (also null)
3034
+ * 2) Undefined returns empty array
3035
+ * 3) Array returns itself
3036
+ *
3037
+ * @private internal utility
3038
+ */
3039
+ function arrayableToArray(input) {
3040
+ if (input === undefined) {
3041
+ return [];
3042
+ }
3043
+ if (input instanceof Array) {
3044
+ return input;
3045
+ }
3046
+ return [input];
3047
+ }
3048
+
3030
3049
  /**
3031
3050
  * Predefined profiles for LLM providers to maintain consistency across the application
3032
3051
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -3107,12 +3126,10 @@
3107
3126
  /**
3108
3127
  * Gets array of execution tools in order of priority
3109
3128
  */
3110
- constructor(...llmExecutionTools) {
3129
+ constructor(title, ...llmExecutionTools) {
3130
+ this.title = title;
3111
3131
  this.llmExecutionTools = llmExecutionTools;
3112
3132
  }
3113
- get title() {
3114
- return 'Multiple LLM Providers';
3115
- }
3116
3133
  get description() {
3117
3134
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3118
3135
  .map(({ title, description }, index) => {
@@ -3198,7 +3215,7 @@
3198
3215
  return await llmExecutionTools.callEmbeddingModel(prompt);
3199
3216
  // <- case [🤖]:
3200
3217
  default:
3201
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3218
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3202
3219
  }
3203
3220
  }
3204
3221
  catch (error) {
@@ -3219,7 +3236,7 @@
3219
3236
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3220
3237
  // 3) ...
3221
3238
  spaceTrim__default["default"]((block) => `
3222
- All execution tools failed:
3239
+ All execution tools of ${this.title} failed:
3223
3240
 
3224
3241
  ${block(errors
3225
3242
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3228,11 +3245,11 @@
3228
3245
  `));
3229
3246
  }
3230
3247
  else if (this.llmExecutionTools.length === 0) {
3231
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3248
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3232
3249
  }
3233
3250
  else {
3234
3251
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3235
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3252
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3236
3253
 
3237
3254
  Available \`LlmExecutionTools\`:
3238
3255
  ${block(this.description)}
@@ -3262,7 +3279,7 @@
3262
3279
  *
3263
3280
  * @public exported from `@promptbook/core`
3264
3281
  */
3265
- function joinLlmExecutionTools(...llmExecutionTools) {
3282
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3266
3283
  if (llmExecutionTools.length === 0) {
3267
3284
  const warningMessage = spaceTrim__default["default"](`
3268
3285
  You have not provided any \`LlmExecutionTools\`
@@ -3294,30 +3311,27 @@
3294
3311
  };
3295
3312
  */
3296
3313
  }
3297
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3314
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3298
3315
  }
3299
3316
  /**
3300
3317
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3301
3318
  */
3302
3319
 
3303
3320
  /**
3304
- * Takes an item or an array of items and returns an array of items
3305
- *
3306
- * 1) Any item except array and undefined returns array with that one item (also null)
3307
- * 2) Undefined returns empty array
3308
- * 3) Array returns itself
3321
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3309
3322
  *
3310
- * @private internal utility
3323
+ * @public exported from `@promptbook/core`
3311
3324
  */
3312
- function arrayableToArray(input) {
3313
- if (input === undefined) {
3314
- return [];
3315
- }
3316
- if (input instanceof Array) {
3317
- return input;
3318
- }
3319
- return [input];
3325
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3326
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3327
+ const llmTools = _llms.length === 1
3328
+ ? _llms[0]
3329
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3330
+ return llmTools;
3320
3331
  }
3332
+ /**
3333
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3334
+ */
3321
3335
 
3322
3336
  /**
3323
3337
  * Just says that the variable is not used but should be kept
@@ -4020,9 +4034,7 @@
4020
4034
  $scriptPipelineExecutionErrors: [],
4021
4035
  $failedResults: [], // Track all failed attempts
4022
4036
  };
4023
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4024
- const _llms = arrayableToArray(tools.llm);
4025
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4037
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4026
4038
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
4027
4039
  const isJokerAttempt = attemptIndex < 0;
4028
4040
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -4542,9 +4554,7 @@
4542
4554
  return ''; // <- Note: Np knowledge present, return empty string
4543
4555
  }
4544
4556
  try {
4545
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4546
- const _llms = arrayableToArray(tools.llm);
4547
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4557
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4548
4558
  const taskEmbeddingPrompt = {
4549
4559
  title: 'Knowledge Search',
4550
4560
  modelRequirements: {
@@ -5145,13 +5155,13 @@
5145
5155
  // Calculate and update tldr based on pipeline progress
5146
5156
  const cv = newOngoingResult;
5147
5157
  // Calculate progress based on parameters resolved vs total parameters
5148
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
5158
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
5149
5159
  let resolvedParameters = 0;
5150
5160
  let currentTaskTitle = '';
5151
5161
  // Get the resolved parameters from output parameters
5152
5162
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
5153
5163
  // Count how many output parameters have non-empty values
5154
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
5164
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
5155
5165
  }
5156
5166
  // Try to determine current task from execution report
5157
5167
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -5324,8 +5334,7 @@
5324
5334
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
5325
5335
  tools,
5326
5336
  });
5327
- const _llms = arrayableToArray(tools.llm);
5328
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5337
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5329
5338
  const availableModels = (await llmTools.listModels())
5330
5339
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
5331
5340
  .map(({ modelName, modelDescription }) => ({
@@ -5369,6 +5378,7 @@
5369
5378
  };
5370
5379
  }
5371
5380
  /**
5381
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
5372
5382
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
5373
5383
  * TODO: [🏢] Check validity of `modelName` in pipeline
5374
5384
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -6224,9 +6234,7 @@
6224
6234
  if (tools === undefined || tools.llm === undefined) {
6225
6235
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
6226
6236
  }
6227
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6228
- const _llms = arrayableToArray(tools.llm);
6229
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6237
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
6230
6238
  const llmToolsWithUsage = countUsage(llmTools);
6231
6239
  // <- TODO: [🌯]
6232
6240
  /*
@@ -10581,7 +10589,7 @@
10581
10589
  * @public exported from `@promptbook/core`
10582
10590
  */
10583
10591
  function createLlmToolsFromConfiguration(configuration, options = {}) {
10584
- const { isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
10592
+ const { title = 'LLM Tools from Configuration', isVerbose = DEFAULT_IS_VERBOSE, userId } = options;
10585
10593
  const llmTools = configuration.map((llmConfiguration) => {
10586
10594
  const registeredItem = $llmToolsRegister
10587
10595
  .list()
@@ -10613,7 +10621,7 @@
10613
10621
  ...llmConfiguration.options,
10614
10622
  });
10615
10623
  });
10616
- return joinLlmExecutionTools(...llmTools);
10624
+ return joinLlmExecutionTools(title, ...llmTools);
10617
10625
  }
10618
10626
  /**
10619
10627
  * TODO: [🎌] Together with `createLlmToolsFromConfiguration` + 'EXECUTION_TOOLS_CLASSES' gets to `@promptbook/core` ALL model providers, make this more efficient
@@ -11213,7 +11221,7 @@
11213
11221
  throw new EnvironmentMismatchError('Function `$getExecutionToolsForNode` works only in Node.js environment');
11214
11222
  }
11215
11223
  const fs = $provideFilesystemForNode();
11216
- const llm = await $provideLlmToolsFromEnv(options);
11224
+ const llm = await $provideLlmToolsFromEnv({ title: 'LLM Tools for Node.js', ...options });
11217
11225
  const executables = await $provideExecutablesForNode();
11218
11226
  const tools = {
11219
11227
  llm,
@@ -11646,11 +11654,12 @@
11646
11654
  catch (error) {
11647
11655
  // Note: If we can't write to cache, silently ignore the error
11648
11656
  // This handles read-only filesystems, permission issues, and missing parent directories
11649
- if (error instanceof Error && (error.message.includes('EROFS') ||
11650
- error.message.includes('read-only') ||
11651
- error.message.includes('EACCES') ||
11652
- error.message.includes('EPERM') ||
11653
- error.message.includes('ENOENT'))) {
11657
+ if (error instanceof Error &&
11658
+ (error.message.includes('EROFS') ||
11659
+ error.message.includes('read-only') ||
11660
+ error.message.includes('EACCES') ||
11661
+ error.message.includes('EPERM') ||
11662
+ error.message.includes('ENOENT'))) {
11654
11663
  // Silently ignore filesystem errors - caching is optional
11655
11664
  return;
11656
11665
  }