@promptbook/documents 0.101.0-2 → 0.101.0-20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/esm/index.es.js +53 -46
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/components.index.d.ts +20 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +14 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +14 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +41 -3
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +3 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +4 -22
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.d.ts +1 -26
  10. package/esm/typings/src/book-2.0/agent-source/parseParameters.d.ts +13 -0
  11. package/esm/typings/src/book-2.0/commitments/ACTION/ACTION.d.ts +8 -2
  12. package/esm/typings/src/book-2.0/commitments/DELETE/DELETE.d.ts +59 -0
  13. package/esm/typings/src/book-2.0/commitments/FORMAT/FORMAT.d.ts +8 -2
  14. package/esm/typings/src/book-2.0/commitments/GOAL/GOAL.d.ts +45 -0
  15. package/esm/typings/src/book-2.0/commitments/KNOWLEDGE/KNOWLEDGE.d.ts +1 -1
  16. package/esm/typings/src/book-2.0/commitments/MEMORY/MEMORY.d.ts +46 -0
  17. package/esm/typings/src/book-2.0/commitments/MESSAGE/MESSAGE.d.ts +47 -0
  18. package/esm/typings/src/book-2.0/commitments/META/META.d.ts +62 -0
  19. package/esm/typings/src/book-2.0/commitments/MODEL/MODEL.d.ts +31 -4
  20. package/esm/typings/src/book-2.0/commitments/NOTE/NOTE.d.ts +20 -2
  21. package/esm/typings/src/book-2.0/commitments/PERSONA/PERSONA.d.ts +8 -2
  22. package/esm/typings/src/book-2.0/commitments/SCENARIO/SCENARIO.d.ts +46 -0
  23. package/esm/typings/src/book-2.0/commitments/STYLE/STYLE.d.ts +8 -2
  24. package/esm/typings/src/book-2.0/commitments/index.d.ts +7 -3
  25. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -0
  26. package/esm/typings/src/book-components/AvatarProfile/AvatarChip/AvatarChip.d.ts +2 -2
  27. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/MockedChat.d.ts +63 -0
  28. package/esm/typings/src/book-components/AvatarProfile/AvatarProfile/index.d.ts +3 -0
  29. package/esm/typings/src/book-components/BookEditor/BookEditor.d.ts +15 -0
  30. package/esm/typings/src/book-components/BookEditor/BookEditorInner.d.ts +4 -0
  31. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +26 -0
  32. package/esm/typings/src/book-components/Chat/LlmChat/LlmChatProps.d.ts +13 -0
  33. package/esm/typings/src/book-components/Chat/hooks/index.d.ts +2 -0
  34. package/esm/typings/src/book-components/Chat/hooks/useChatAutoScroll.d.ts +41 -0
  35. package/esm/typings/src/book-components/Chat/hooks/useSendMessageToLlmChat.d.ts +44 -0
  36. package/esm/typings/src/book-components/Chat/utils/parseMessageButtons.d.ts +22 -0
  37. package/esm/typings/src/book-components/icons/PauseIcon.d.ts +8 -0
  38. package/esm/typings/src/book-components/icons/PlayIcon.d.ts +8 -0
  39. package/esm/typings/src/execution/createPipelineExecutor/40-executeAttempts.d.ts +1 -2
  40. package/esm/typings/src/execution/createPipelineExecutor/getKnowledgeForTask.d.ts +1 -3
  41. package/esm/typings/src/formats/csv/CsvFormatError.d.ts +1 -1
  42. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsConfigurationFromEnv.d.ts +1 -1
  43. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
  44. package/esm/typings/src/llm-providers/_common/register/$provideLlmToolsForWizardOrCli.d.ts +1 -2
  45. package/esm/typings/src/llm-providers/_common/register/createLlmToolsFromConfiguration.d.ts +8 -2
  46. package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
  47. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +5 -13
  48. package/esm/typings/src/llm-providers/_multiple/getSingleLlmExecutionTools.d.ts +11 -0
  49. package/esm/typings/src/llm-providers/_multiple/joinLlmExecutionTools.d.ts +2 -1
  50. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +54 -0
  51. package/esm/typings/src/llm-providers/agent/createAgentLlmExecutionTools.d.ts +29 -0
  52. package/esm/typings/src/llm-providers/agent/playground/playground.d.ts +8 -0
  53. package/esm/typings/src/llm-providers/agent/register-configuration.d.ts +11 -0
  54. package/esm/typings/src/llm-providers/agent/register-constructor.d.ts +13 -0
  55. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +1 -0
  56. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -5
  57. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +2 -6
  58. package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
  59. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  60. package/esm/typings/src/remote-server/openapi-types.d.ts +31 -31
  61. package/esm/typings/src/scrapers/markdown/MarkdownScraper.d.ts +1 -2
  62. package/esm/typings/src/types/ModelRequirements.d.ts +2 -4
  63. package/esm/typings/src/utils/color/utils/colorSaturation.d.ts +1 -1
  64. package/esm/typings/src/utils/editable/edit-pipeline-string/addPipelineCommand.d.ts +1 -1
  65. package/esm/typings/src/utils/markdown/humanizeAiText.d.ts +1 -0
  66. package/esm/typings/src/version.d.ts +1 -1
  67. package/package.json +2 -2
  68. package/umd/index.umd.js +53 -46
  69. package/umd/index.umd.js.map +1 -1
  70. package/esm/typings/src/book-2.0/utils/extractAgentMetadata.d.ts +0 -17
  71. package/esm/typings/src/book-2.0/utils/extractProfileImageFromSystemMessage.d.ts +0 -12
  72. package/esm/typings/src/llm-providers/mocked/test/joker.test.d.ts +0 -4
  73. package/esm/typings/src/llm-providers/mocked/test/mocked-chat.test.d.ts +0 -5
  74. package/esm/typings/src/llm-providers/mocked/test/mocked-completion.test.d.ts +0 -4
  75. package/esm/typings/src/scripting/_test/postprocessing.test.d.ts +0 -1
  76. /package/esm/typings/src/{cli/test/ptbk.test.d.ts → llm-providers/_common/utils/removeUnsupportedModelRequirements.test.d.ts} +0 -0
@@ -14,4 +14,4 @@ export {};
14
14
  /**
15
15
  * TODO: [🧠] What is the better solution - `- xxx`, - `- xxx` or preserve (see also next TODO)
16
16
  * TODO: When existing commands 1) as 2) number 3) list, add 4) new command as next number
17
- */
17
+ */
@@ -9,5 +9,6 @@ import { string_markdown } from '../../types/typeAliases';
9
9
  */
10
10
  export declare function humanizeAiText(aiText: string_markdown): string_markdown;
11
11
  /**
12
+ * TODO: [🧠] Maybe this should be exported from `@promptbook/utils` not `@promptbook/markdown-utils`
12
13
  * TODO: [🅾️] !!! Use this across the project where AI text is involved
13
14
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.101.0-1`).
18
+ * It follows semantic versioning (e.g., `0.101.0-19`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/documents",
3
- "version": "0.101.0-2",
3
+ "version": "0.101.0-20",
4
4
  "description": "Promptbook: Run AI apps in plain human language across multiple models and platforms",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/documents.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.101.0-2"
98
+ "@promptbook/core": "0.101.0-20"
99
99
  },
100
100
  "dependencies": {
101
101
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-2';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-20';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1055,11 +1055,12 @@
1055
1055
  catch (error) {
1056
1056
  // Note: If we can't create cache directory, continue without it
1057
1057
  // This handles read-only filesystems, permission issues, and missing parent directories
1058
- if (error instanceof Error && (error.message.includes('EROFS') ||
1059
- error.message.includes('read-only') ||
1060
- error.message.includes('EACCES') ||
1061
- error.message.includes('EPERM') ||
1062
- error.message.includes('ENOENT'))) ;
1058
+ if (error instanceof Error &&
1059
+ (error.message.includes('EROFS') ||
1060
+ error.message.includes('read-only') ||
1061
+ error.message.includes('EACCES') ||
1062
+ error.message.includes('EPERM') ||
1063
+ error.message.includes('ENOENT'))) ;
1063
1064
  else {
1064
1065
  // Re-throw other unexpected errors
1065
1066
  throw error;
@@ -3119,6 +3120,25 @@
3119
3120
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3120
3121
  */
3121
3122
 
3123
+ /**
3124
+ * Takes an item or an array of items and returns an array of items
3125
+ *
3126
+ * 1) Any item except array and undefined returns array with that one item (also null)
3127
+ * 2) Undefined returns empty array
3128
+ * 3) Array returns itself
3129
+ *
3130
+ * @private internal utility
3131
+ */
3132
+ function arrayableToArray(input) {
3133
+ if (input === undefined) {
3134
+ return [];
3135
+ }
3136
+ if (input instanceof Array) {
3137
+ return input;
3138
+ }
3139
+ return [input];
3140
+ }
3141
+
3122
3142
  /**
3123
3143
  * Predefined profiles for LLM providers to maintain consistency across the application
3124
3144
  * These profiles represent each provider as a virtual persona in chat interfaces
@@ -3199,12 +3219,10 @@
3199
3219
  /**
3200
3220
  * Gets array of execution tools in order of priority
3201
3221
  */
3202
- constructor(...llmExecutionTools) {
3222
+ constructor(title, ...llmExecutionTools) {
3223
+ this.title = title;
3203
3224
  this.llmExecutionTools = llmExecutionTools;
3204
3225
  }
3205
- get title() {
3206
- return 'Multiple LLM Providers';
3207
- }
3208
3226
  get description() {
3209
3227
  const innerModelsTitlesAndDescriptions = this.llmExecutionTools
3210
3228
  .map(({ title, description }, index) => {
@@ -3290,7 +3308,7 @@
3290
3308
  return await llmExecutionTools.callEmbeddingModel(prompt);
3291
3309
  // <- case [🤖]:
3292
3310
  default:
3293
- throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
3311
+ throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
3294
3312
  }
3295
3313
  }
3296
3314
  catch (error) {
@@ -3311,7 +3329,7 @@
3311
3329
  // 2) AnthropicClaude throw PipelineExecutionError: Parameter `{knowledge}` is not defined
3312
3330
  // 3) ...
3313
3331
  spaceTrim__default["default"]((block) => `
3314
- All execution tools failed:
3332
+ All execution tools of ${this.title} failed:
3315
3333
 
3316
3334
  ${block(errors
3317
3335
  .map(({ error, llmExecutionTools }, i) => `${i + 1}) **${llmExecutionTools.title}** thrown **${error.name || 'Error'}:** ${error.message}`)
@@ -3320,11 +3338,11 @@
3320
3338
  `));
3321
3339
  }
3322
3340
  else if (this.llmExecutionTools.length === 0) {
3323
- throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\``);
3341
+ throw new PipelineExecutionError(`You have not provided any \`LlmExecutionTools\` into ${this.title}`);
3324
3342
  }
3325
3343
  else {
3326
3344
  throw new PipelineExecutionError(spaceTrim__default["default"]((block) => `
3327
- You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}"
3345
+ You have not provided any \`LlmExecutionTools\` that support model variant "${prompt.modelRequirements.modelVariant}" into ${this.title}
3328
3346
 
3329
3347
  Available \`LlmExecutionTools\`:
3330
3348
  ${block(this.description)}
@@ -3354,7 +3372,7 @@
3354
3372
  *
3355
3373
  * @public exported from `@promptbook/core`
3356
3374
  */
3357
- function joinLlmExecutionTools(...llmExecutionTools) {
3375
+ function joinLlmExecutionTools(title, ...llmExecutionTools) {
3358
3376
  if (llmExecutionTools.length === 0) {
3359
3377
  const warningMessage = spaceTrim__default["default"](`
3360
3378
  You have not provided any \`LlmExecutionTools\`
@@ -3386,30 +3404,27 @@
3386
3404
  };
3387
3405
  */
3388
3406
  }
3389
- return new MultipleLlmExecutionTools(...llmExecutionTools);
3407
+ return new MultipleLlmExecutionTools(title || 'Multiple LLM Providers joined by `joinLlmExecutionTools`', ...llmExecutionTools);
3390
3408
  }
3391
3409
  /**
3392
3410
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3393
3411
  */
3394
3412
 
3395
3413
  /**
3396
- * Takes an item or an array of items and returns an array of items
3397
- *
3398
- * 1) Any item except array and undefined returns array with that one item (also null)
3399
- * 2) Undefined returns empty array
3400
- * 3) Array returns itself
3414
+ * Just returns the given `LlmExecutionTools` or joins multiple into one
3401
3415
  *
3402
- * @private internal utility
3416
+ * @public exported from `@promptbook/core`
3403
3417
  */
3404
- function arrayableToArray(input) {
3405
- if (input === undefined) {
3406
- return [];
3407
- }
3408
- if (input instanceof Array) {
3409
- return input;
3410
- }
3411
- return [input];
3418
+ function getSingleLlmExecutionTools(oneOrMoreLlmExecutionTools) {
3419
+ const _llms = arrayableToArray(oneOrMoreLlmExecutionTools);
3420
+ const llmTools = _llms.length === 1
3421
+ ? _llms[0]
3422
+ : joinLlmExecutionTools('Multiple LLM Providers joined by `getSingleLlmExecutionTools`', ..._llms);
3423
+ return llmTools;
3412
3424
  }
3425
+ /**
3426
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3427
+ */
3413
3428
 
3414
3429
  /**
3415
3430
  * Prepares the persona for the pipeline
@@ -3428,8 +3443,7 @@
3428
3443
  pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
3429
3444
  tools,
3430
3445
  });
3431
- const _llms = arrayableToArray(tools.llm);
3432
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
3446
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
3433
3447
  const availableModels = (await llmTools.listModels())
3434
3448
  .filter(({ modelVariant }) => modelVariant === 'CHAT')
3435
3449
  .map(({ modelName, modelDescription }) => ({
@@ -3473,6 +3487,7 @@
3473
3487
  };
3474
3488
  }
3475
3489
  /**
3490
+ * TODO: [😩] DRY `preparePersona` and `selectBestModelFromAvailable`
3476
3491
  * TODO: [🔃][main] If the persona was prepared with different version or different set of models, prepare it once again
3477
3492
  * TODO: [🏢] Check validity of `modelName` in pipeline
3478
3493
  * TODO: [🏢] Check validity of `systemMessage` in pipeline
@@ -4190,9 +4205,7 @@
4190
4205
  if (tools === undefined || tools.llm === undefined) {
4191
4206
  throw new MissingToolsError('LLM tools are required for preparing the pipeline');
4192
4207
  }
4193
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
4194
- const _llms = arrayableToArray(tools.llm);
4195
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
4208
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
4196
4209
  const llmToolsWithUsage = countUsage(llmTools);
4197
4210
  // <- TODO: [🌯]
4198
4211
  /*
@@ -5335,9 +5348,7 @@
5335
5348
  $scriptPipelineExecutionErrors: [],
5336
5349
  $failedResults: [], // Track all failed attempts
5337
5350
  };
5338
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5339
- const _llms = arrayableToArray(tools.llm);
5340
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5351
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5341
5352
  attempts: for (let attemptIndex = -jokerParameterNames.length; attemptIndex < maxAttempts; attemptIndex++) {
5342
5353
  const isJokerAttempt = attemptIndex < 0;
5343
5354
  const jokerParameterName = jokerParameterNames[jokerParameterNames.length + attemptIndex];
@@ -5857,9 +5868,7 @@
5857
5868
  return ''; // <- Note: Np knowledge present, return empty string
5858
5869
  }
5859
5870
  try {
5860
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
5861
- const _llms = arrayableToArray(tools.llm);
5862
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
5871
+ const llmTools = getSingleLlmExecutionTools(tools.llm);
5863
5872
  const taskEmbeddingPrompt = {
5864
5873
  title: 'Knowledge Search',
5865
5874
  modelRequirements: {
@@ -6460,13 +6469,13 @@
6460
6469
  // Calculate and update tldr based on pipeline progress
6461
6470
  const cv = newOngoingResult;
6462
6471
  // Calculate progress based on parameters resolved vs total parameters
6463
- const totalParameters = pipeline.parameters.filter(p => !p.isInput).length;
6472
+ const totalParameters = pipeline.parameters.filter((p) => !p.isInput).length;
6464
6473
  let resolvedParameters = 0;
6465
6474
  let currentTaskTitle = '';
6466
6475
  // Get the resolved parameters from output parameters
6467
6476
  if (cv === null || cv === void 0 ? void 0 : cv.outputParameters) {
6468
6477
  // Count how many output parameters have non-empty values
6469
- resolvedParameters = Object.values(cv.outputParameters).filter(value => value !== undefined && value !== null && String(value).trim() !== '').length;
6478
+ resolvedParameters = Object.values(cv.outputParameters).filter((value) => value !== undefined && value !== null && String(value).trim() !== '').length;
6470
6479
  }
6471
6480
  // Try to determine current task from execution report
6472
6481
  if (((_a = cv === null || cv === void 0 ? void 0 : cv.executionReport) === null || _a === void 0 ? void 0 : _a.promptExecutions) && cv.executionReport.promptExecutions.length > 0) {
@@ -6576,9 +6585,7 @@
6576
6585
  throw new MissingToolsError('LLM tools are required for scraping external files');
6577
6586
  // <- Note: This scraper is used in all other scrapers, so saying "external files" not "markdown files"
6578
6587
  }
6579
- // TODO: [🚐] Make arrayable LLMs -> single LLM DRY
6580
- const _llms = arrayableToArray(llm);
6581
- const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
6588
+ const llmTools = getSingleLlmExecutionTools(llm);
6582
6589
  // TODO: [🌼] In future use `ptbk make` and made getPipelineCollection
6583
6590
  const collection = createCollectionFromJson(...PipelineCollection);
6584
6591
  const prepareKnowledgeFromMarkdownExecutor = createPipelineExecutor({