@promptbook/core 0.105.0-0 → 0.105.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/esm/index.es.js +960 -337
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +6 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +10 -3
  7. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +11 -1
  8. package/esm/typings/src/book-2.0/agent-source/communication-samples.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.blocks.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.import.test.d.ts +1 -0
  11. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.import.test.d.ts +1 -0
  12. package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.blocks.test.d.ts +1 -0
  13. package/esm/typings/src/commitments/USE_TIME/USE_TIME.d.ts +40 -0
  14. package/esm/typings/src/commitments/USE_TIME/USE_TIME.test.d.ts +1 -0
  15. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +8 -0
  16. package/esm/typings/src/commitments/_base/CommitmentDefinition.d.ts +8 -0
  17. package/esm/typings/src/commitments/index.d.ts +11 -2
  18. package/esm/typings/src/config.d.ts +1 -0
  19. package/esm/typings/src/import-plugins/$fileImportPlugins.d.ts +7 -0
  20. package/esm/typings/src/import-plugins/AgentFileImportPlugin.d.ts +7 -0
  21. package/esm/typings/src/import-plugins/FileImportPlugin.d.ts +24 -0
  22. package/esm/typings/src/import-plugins/JsonFileImportPlugin.d.ts +7 -0
  23. package/esm/typings/src/import-plugins/TextFileImportPlugin.d.ts +7 -0
  24. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +2 -1
  25. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +2 -2
  26. package/esm/typings/src/llm-providers/agent/Agent.d.ts +9 -2
  27. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +3 -1
  28. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +10 -0
  29. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  30. package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +6 -1
  31. package/esm/typings/src/types/ModelRequirements.d.ts +6 -12
  32. package/esm/typings/src/utils/execCommand/$execCommandNormalizeOptions.d.ts +2 -3
  33. package/esm/typings/src/utils/execCommand/ExecCommandOptions.d.ts +7 -1
  34. package/esm/typings/src/utils/organization/keepImported.d.ts +9 -0
  35. package/esm/typings/src/utils/organization/keepTypeImported.d.ts +0 -1
  36. package/esm/typings/src/utils/random/$generateBookBoilerplate.d.ts +4 -0
  37. package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +2 -1
  38. package/esm/typings/src/utils/random/$randomAgentRule.d.ts +14 -0
  39. package/esm/typings/src/version.d.ts +1 -1
  40. package/package.json +1 -1
  41. package/umd/index.umd.js +960 -336
  42. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.105.0-0';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-3';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -992,6 +992,7 @@ const PROMPTBOOK_SYNTAX_COLORS = {
992
992
  SEPARATOR: Color.fromHex('#cccccc'),
993
993
  COMMITMENT: Color.fromHex('#DA0F78'),
994
994
  PARAMETER: Color.fromHex('#8e44ad'),
995
+ CODE_BLOCK: Color.fromHex('#7700ffff'),
995
996
  };
996
997
  // <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
997
998
  /**
@@ -3649,74 +3650,90 @@ function addUsage(...usageItems) {
3649
3650
  * in real-time through an observable.
3650
3651
  *
3651
3652
  * @param llmTools - The LLM tools to be intercepted and tracked
3652
- * @returns An augmented version of the tools that includes usage tracking capabilities
3653
+ * @returns Full proxy of the tools with added usage tracking capabilities
3653
3654
  * @public exported from `@promptbook/core`
3654
3655
  */
3655
3656
  function countUsage(llmTools) {
3656
3657
  let totalUsage = ZERO_USAGE;
3657
3658
  const spending = new Subject();
3658
- const proxyTools = {
3659
- get title() {
3660
- return `${llmTools.title} (+usage)`;
3661
- // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3662
- // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3663
- },
3664
- get description() {
3665
- return `${llmTools.description} (+usage)`;
3666
- // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3667
- // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3668
- },
3669
- checkConfiguration() {
3670
- return /* not await */ llmTools.checkConfiguration();
3671
- },
3672
- listModels() {
3673
- return /* not await */ llmTools.listModels();
3674
- },
3675
- spending() {
3676
- return spending.asObservable();
3677
- },
3678
- getTotalUsage() {
3679
- // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3680
- return totalUsage;
3659
+ // Create a Proxy to intercept all property access and ensure full proxying of all properties
3660
+ const proxyTools = new Proxy(llmTools, {
3661
+ get(target, prop, receiver) {
3662
+ // Handle title property
3663
+ if (prop === 'title') {
3664
+ return `${target.title} (+usage)`;
3665
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3666
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3667
+ }
3668
+ // Handle description property
3669
+ if (prop === 'description') {
3670
+ return `${target.description} (+usage)`;
3671
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3672
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3673
+ }
3674
+ // Handle spending method (new method added by this wrapper)
3675
+ if (prop === 'spending') {
3676
+ return () => {
3677
+ return spending.asObservable();
3678
+ };
3679
+ }
3680
+ // Handle getTotalUsage method (new method added by this wrapper)
3681
+ if (prop === 'getTotalUsage') {
3682
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3683
+ return () => {
3684
+ return totalUsage;
3685
+ };
3686
+ }
3687
+ // Handle callChatModel method with usage counting
3688
+ if (prop === 'callChatModel' && target.callChatModel !== undefined) {
3689
+ return async (prompt) => {
3690
+ // console.info('[🚕] callChatModel through countTotalUsage');
3691
+ const promptResult = await target.callChatModel(prompt);
3692
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3693
+ spending.next(promptResult.usage);
3694
+ return promptResult;
3695
+ };
3696
+ }
3697
+ // Handle callCompletionModel method with usage counting
3698
+ if (prop === 'callCompletionModel' && target.callCompletionModel !== undefined) {
3699
+ return async (prompt) => {
3700
+ // console.info('[🚕] callCompletionModel through countTotalUsage');
3701
+ const promptResult = await target.callCompletionModel(prompt);
3702
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3703
+ spending.next(promptResult.usage);
3704
+ return promptResult;
3705
+ };
3706
+ }
3707
+ // Handle callEmbeddingModel method with usage counting
3708
+ if (prop === 'callEmbeddingModel' && target.callEmbeddingModel !== undefined) {
3709
+ return async (prompt) => {
3710
+ // console.info('[🚕] callEmbeddingModel through countTotalUsage');
3711
+ const promptResult = await target.callEmbeddingModel(prompt);
3712
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3713
+ spending.next(promptResult.usage);
3714
+ return promptResult;
3715
+ };
3716
+ }
3717
+ // Handle callImageGenerationModel method with usage counting
3718
+ if (prop === 'callImageGenerationModel' && target.callImageGenerationModel !== undefined) {
3719
+ return async (prompt) => {
3720
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3721
+ const promptResult = await target.callImageGenerationModel(prompt);
3722
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3723
+ spending.next(promptResult.usage);
3724
+ return promptResult;
3725
+ };
3726
+ }
3727
+ // <- Note: [🤖]
3728
+ // For all other properties and methods, delegate to the original target
3729
+ const value = Reflect.get(target, prop, receiver);
3730
+ // If it's a function, bind it to the target to preserve context
3731
+ if (typeof value === 'function') {
3732
+ return value.bind(target);
3733
+ }
3734
+ return value;
3681
3735
  },
3682
- };
3683
- if (llmTools.callChatModel !== undefined) {
3684
- proxyTools.callChatModel = async (prompt) => {
3685
- // console.info('[🚕] callChatModel through countTotalUsage');
3686
- const promptResult = await llmTools.callChatModel(prompt);
3687
- totalUsage = addUsage(totalUsage, promptResult.usage);
3688
- spending.next(promptResult.usage);
3689
- return promptResult;
3690
- };
3691
- }
3692
- if (llmTools.callCompletionModel !== undefined) {
3693
- proxyTools.callCompletionModel = async (prompt) => {
3694
- // console.info('[🚕] callCompletionModel through countTotalUsage');
3695
- const promptResult = await llmTools.callCompletionModel(prompt);
3696
- totalUsage = addUsage(totalUsage, promptResult.usage);
3697
- spending.next(promptResult.usage);
3698
- return promptResult;
3699
- };
3700
- }
3701
- if (llmTools.callEmbeddingModel !== undefined) {
3702
- proxyTools.callEmbeddingModel = async (prompt) => {
3703
- // console.info('[🚕] callEmbeddingModel through countTotalUsage');
3704
- const promptResult = await llmTools.callEmbeddingModel(prompt);
3705
- totalUsage = addUsage(totalUsage, promptResult.usage);
3706
- spending.next(promptResult.usage);
3707
- return promptResult;
3708
- };
3709
- }
3710
- if (llmTools.callImageGenerationModel !== undefined) {
3711
- proxyTools.callImageGenerationModel = async (prompt) => {
3712
- // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3713
- const promptResult = await llmTools.callImageGenerationModel(prompt);
3714
- totalUsage = addUsage(totalUsage, promptResult.usage);
3715
- spending.next(promptResult.usage);
3716
- return promptResult;
3717
- };
3718
- }
3719
- // <- Note: [🤖]
3736
+ });
3720
3737
  return proxyTools;
3721
3738
  }
3722
3739
  /**
@@ -7494,6 +7511,40 @@ async function preparePersona(personaDescription, tools, options) {
7494
7511
  * TODO: [🏢] Check validity of `temperature` in pipeline
7495
7512
  */
7496
7513
 
7514
+ /**
7515
+ * Creates an empty/basic agent model requirements object
7516
+ * This serves as the starting point for the reduce-like pattern
7517
+ * where each commitment applies its changes to build the final requirements
7518
+ *
7519
+ * @public exported from `@promptbook/core`
7520
+ */
7521
+ function createEmptyAgentModelRequirements() {
7522
+ return {
7523
+ systemMessage: '',
7524
+ // modelName: 'gpt-5',
7525
+ modelName: 'gemini-2.5-flash-lite',
7526
+ temperature: 0.7,
7527
+ topP: 0.9,
7528
+ topK: 50,
7529
+ };
7530
+ }
7531
+ /**
7532
+ * Creates a basic agent model requirements with just the agent name
7533
+ * This is used when we have an agent name but no commitments
7534
+ *
7535
+ * @public exported from `@promptbook/core`
7536
+ */
7537
+ function createBasicAgentModelRequirements(agentName) {
7538
+ const empty = createEmptyAgentModelRequirements();
7539
+ return {
7540
+ ...empty,
7541
+ systemMessage: `You are ${agentName || 'AI Agent'}`,
7542
+ };
7543
+ }
7544
+ /**
7545
+ * TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
7546
+ */
7547
+
7497
7548
  /**
7498
7549
  * Generates a regex pattern to match a specific commitment
7499
7550
  *
@@ -7610,6 +7661,14 @@ class BaseCommitmentDefinition {
7610
7661
  return this.appendToSystemMessage(requirements, commentSection);
7611
7662
  }
7612
7663
  }
7664
+ /**
7665
+ * Gets tool function implementations provided by this commitment
7666
+ *
7667
+ * When the `applyToAgentModelRequirements` adds tools to the requirements, this method should return the corresponding function definitions.
7668
+ */
7669
+ getToolFunctions() {
7670
+ return {};
7671
+ }
7613
7672
  }
7614
7673
 
7615
7674
  /**
@@ -8708,79 +8767,6 @@ class FromCommitmentDefinition extends BaseCommitmentDefinition {
8708
8767
  * Note: [💞] Ignore a discrepancy between file name and entity name
8709
8768
  */
8710
8769
 
8711
- /**
8712
- * IMPORT commitment definition
8713
- *
8714
- * The IMPORT commitment tells the agent to import content from another agent at the current location.
8715
- *
8716
- * Example usage in agent source:
8717
- *
8718
- * ```book
8719
- * IMPORT https://s6.ptbk.io/benjamin-white
8720
- * ```
8721
- *
8722
- * @private [🪔] Maybe export the commitments through some package
8723
- */
8724
- class ImportCommitmentDefinition extends BaseCommitmentDefinition {
8725
- constructor(type = 'IMPORT') {
8726
- super(type);
8727
- }
8728
- /**
8729
- * Short one-line description of IMPORT.
8730
- */
8731
- get description() {
8732
- return 'Import content from another agent.';
8733
- }
8734
- /**
8735
- * Icon for this commitment.
8736
- */
8737
- get icon() {
8738
- return '📥';
8739
- }
8740
- /**
8741
- * Markdown documentation for IMPORT commitment.
8742
- */
8743
- get documentation() {
8744
- return spaceTrim$1(`
8745
- # ${this.type}
8746
-
8747
- Imports content from another agent at the location of the commitment.
8748
-
8749
- ## Examples
8750
-
8751
- \`\`\`book
8752
- My AI Agent
8753
-
8754
- IMPORT https://s6.ptbk.io/benjamin-white
8755
- RULE Speak only in English.
8756
- \`\`\`
8757
- `);
8758
- }
8759
- applyToAgentModelRequirements(requirements, content) {
8760
- const trimmedContent = content.trim();
8761
- if (!trimmedContent) {
8762
- return requirements;
8763
- }
8764
- if (!isValidAgentUrl(trimmedContent)) {
8765
- throw new Error(spaceTrim$1((block) => `
8766
- Invalid agent URL in IMPORT commitment: "${trimmedContent}"
8767
-
8768
- \`\`\`book
8769
- ${block(content)}
8770
- \`\`\`
8771
- `));
8772
- }
8773
- const importedAgentUrl = trimmedContent;
8774
- return {
8775
- ...requirements,
8776
- importedAgentUrls: [...(requirements.importedAgentUrls || []), importedAgentUrl],
8777
- };
8778
- }
8779
- }
8780
- /**
8781
- * Note: [💞] Ignore a discrepancy between file name and entity name
8782
- */
8783
-
8784
8770
  /**
8785
8771
  * GOAL commitment definition
8786
8772
  *
@@ -8881,6 +8867,87 @@ class GoalCommitmentDefinition extends BaseCommitmentDefinition {
8881
8867
  * Note: [💞] Ignore a discrepancy between file name and entity name
8882
8868
  */
8883
8869
 
8870
+ /**
8871
+ * IMPORT commitment definition
8872
+ *
8873
+ * The IMPORT commitment tells the agent to import content from another agent at the current location.
8874
+ *
8875
+ * Example usage in agent source:
8876
+ *
8877
+ * ```book
8878
+ * IMPORT https://s6.ptbk.io/benjamin-white
8879
+ * ```
8880
+ *
8881
+ * @private [🪔] Maybe export the commitments through some package
8882
+ */
8883
+ class ImportCommitmentDefinition extends BaseCommitmentDefinition {
8884
+ constructor(type = 'IMPORT') {
8885
+ super(type);
8886
+ }
8887
+ /**
8888
+ * Short one-line description of IMPORT.
8889
+ */
8890
+ get description() {
8891
+ return 'Import content from another agent or a generic text file.';
8892
+ }
8893
+ /**
8894
+ * Icon for this commitment.
8895
+ */
8896
+ get icon() {
8897
+ return '📥';
8898
+ }
8899
+ /**
8900
+ * Markdown documentation for IMPORT commitment.
8901
+ */
8902
+ get documentation() {
8903
+ return spaceTrim$1(`
8904
+ # ${this.type}
8905
+
8906
+ Imports content from another agent or a generic text file at the location of the commitment.
8907
+
8908
+ ## Examples
8909
+
8910
+ \`\`\`book
8911
+ My AI Agent
8912
+
8913
+ IMPORT https://s6.ptbk.io/benjamin-white
8914
+ IMPORT https://example.com/some-text-file.txt
8915
+ IMPORT ./path/to/local-file.json
8916
+ RULE Speak only in English.
8917
+ \`\`\`
8918
+ `);
8919
+ }
8920
+ applyToAgentModelRequirements(requirements, content) {
8921
+ const trimmedContent = content.trim();
8922
+ if (!trimmedContent) {
8923
+ return requirements;
8924
+ }
8925
+ if (isValidAgentUrl(trimmedContent)) {
8926
+ const importedAgentUrl = trimmedContent;
8927
+ return {
8928
+ ...requirements,
8929
+ importedAgentUrls: [...(requirements.importedAgentUrls || []), importedAgentUrl],
8930
+ };
8931
+ }
8932
+ if (isValidUrl(trimmedContent) || isValidFilePath(trimmedContent)) {
8933
+ return {
8934
+ ...requirements,
8935
+ importedFileUrls: [...(requirements.importedFileUrls || []), trimmedContent],
8936
+ };
8937
+ }
8938
+ throw new Error(spaceTrim$1((block) => `
8939
+ Invalid agent URL or file path in IMPORT commitment: "${trimmedContent}"
8940
+
8941
+ \`\`\`book
8942
+ ${block(content)}
8943
+ \`\`\`
8944
+ `));
8945
+ }
8946
+ }
8947
+ /**
8948
+ * Note: [💞] Ignore a discrepancy between file name and entity name
8949
+ */
8950
+
8884
8951
  /**
8885
8952
  * KNOWLEDGE commitment definition
8886
8953
  *
@@ -9300,7 +9367,13 @@ class InitialMessageCommitmentDefinition extends BaseCommitmentDefinition {
9300
9367
  `);
9301
9368
  }
9302
9369
  applyToAgentModelRequirements(requirements, content) {
9303
- return requirements;
9370
+ // INITIAL MESSAGE is for UI display purposes and for conversation history construction.
9371
+ const newSample = { question: null, answer: content };
9372
+ const newSamples = [...(requirements.samples || []), newSample];
9373
+ return {
9374
+ ...requirements,
9375
+ samples: newSamples,
9376
+ };
9304
9377
  }
9305
9378
  }
9306
9379
 
@@ -10332,27 +10405,16 @@ class NoteCommitmentDefinition extends BaseCommitmentDefinition {
10332
10405
  `);
10333
10406
  }
10334
10407
  applyToAgentModelRequirements(requirements, content) {
10335
- var _a;
10336
10408
  // The NOTE commitment makes no changes to the system message or model requirements
10337
10409
  // It only stores the note content in metadata for documentation purposes
10338
- const trimmedContent = content.trim();
10339
- if (!trimmedContent) {
10410
+ const trimmedContent = spaceTrim$1(content);
10411
+ if (trimmedContent === '') {
10340
10412
  return requirements;
10341
10413
  }
10342
- // Get existing note content from metadata
10343
- const existingNoteContent = ((_a = requirements.metadata) === null || _a === void 0 ? void 0 : _a.NOTE) || '';
10344
- // Merge the new content with existing note content
10345
- // When multiple NOTE commitments exist, they are aggregated together
10346
- const mergedNoteContent = existingNoteContent ? `${existingNoteContent}\n${trimmedContent}` : trimmedContent;
10347
- // Store the merged note content in metadata for debugging and inspection
10348
- const updatedMetadata = {
10349
- ...requirements.metadata,
10350
- NOTE: mergedNoteContent,
10351
- };
10352
- // Return requirements with updated metadata but no changes to system message
10414
+ // Return requirements with updated notes but no changes to system message
10353
10415
  return {
10354
10416
  ...requirements,
10355
- metadata: updatedMetadata,
10417
+ notes: [...(requirements.notes || []), trimmedContent],
10356
10418
  };
10357
10419
  }
10358
10420
  }
@@ -11371,6 +11433,104 @@ class UseSearchEngineCommitmentDefinition extends BaseCommitmentDefinition {
11371
11433
  * Note: [💞] Ignore a discrepancy between file name and entity name
11372
11434
  */
11373
11435
 
11436
+ /**
11437
+ * USE TIME commitment definition
11438
+ *
11439
+ * The `USE TIME` commitment indicates that the agent should be able to determine the current date and time.
11440
+ *
11441
+ * Example usage in agent source:
11442
+ *
11443
+ * ```book
11444
+ * USE TIME
11445
+ * ```
11446
+ *
11447
+ * @private [🪔] Maybe export the commitments through some package
11448
+ */
11449
+ class UseTimeCommitmentDefinition extends BaseCommitmentDefinition {
11450
+ constructor() {
11451
+ super('USE TIME', ['CURRENT TIME', 'TIME', 'DATE']);
11452
+ }
11453
+ /**
11454
+ * Short one-line description of USE TIME.
11455
+ */
11456
+ get description() {
11457
+ return 'Enable the agent to determine the current date and time.';
11458
+ }
11459
+ /**
11460
+ * Icon for this commitment.
11461
+ */
11462
+ get icon() {
11463
+ return '🕒';
11464
+ }
11465
+ /**
11466
+ * Markdown documentation for USE TIME commitment.
11467
+ */
11468
+ get documentation() {
11469
+ return spaceTrim$1(`
11470
+ # USE TIME
11471
+
11472
+ Enables the agent to determine the current date and time.
11473
+
11474
+ ## Key aspects
11475
+
11476
+ - This tool won't receive any input.
11477
+ - It outputs the current date and time as an ISO 8601 string.
11478
+ - Allows the agent to answer questions about the current time or date.
11479
+
11480
+ ## Examples
11481
+
11482
+ \`\`\`book
11483
+ Time-aware Assistant
11484
+
11485
+ PERSONA You are a helpful assistant who knows the current time.
11486
+ USE TIME
11487
+ \`\`\`
11488
+ `);
11489
+ }
11490
+ applyToAgentModelRequirements(requirements, content) {
11491
+ // Get existing tools array or create new one
11492
+ const existingTools = requirements.tools || [];
11493
+ // Add 'get_current_time' to tools if not already present
11494
+ const updatedTools = existingTools.some((tool) => tool.name === 'get_current_time')
11495
+ ? existingTools
11496
+ : [
11497
+ ...existingTools,
11498
+ {
11499
+ name: 'get_current_time',
11500
+ description: 'Get the current date and time in ISO 8601 format.',
11501
+ parameters: {
11502
+ type: 'object',
11503
+ properties: {},
11504
+ required: [],
11505
+ },
11506
+ },
11507
+ // <- TODO: !!!! define the function in LLM tools
11508
+ ];
11509
+ // Return requirements with updated tools and metadata
11510
+ return {
11511
+ ...requirements,
11512
+ tools: updatedTools,
11513
+ metadata: {
11514
+ ...requirements.metadata,
11515
+ },
11516
+ };
11517
+ }
11518
+ /**
11519
+ * Gets the `get_current_time` tool function implementation.
11520
+ */
11521
+ getToolFunctions() {
11522
+ return {
11523
+ async get_current_time() {
11524
+ console.log('!!!! [Tool] get_current_time called');
11525
+ return new Date().toISOString();
11526
+ },
11527
+ };
11528
+ }
11529
+ }
11530
+ /**
11531
+ * Note: [💞] Ignore a discrepancy between file name and entity name
11532
+ */
11533
+
11374
11534
  /**
11375
11535
  * Placeholder commitment definition for commitments that are not yet implemented
11376
11536
  *
@@ -11438,7 +11598,6 @@ class NotYetImplementedCommitmentDefinition extends BaseCommitmentDefinition {
11438
11598
  }
11439
11599
  }
11440
11600
 
11441
- // Import all commitment definition classes
11442
11601
  /**
11443
11602
  * Registry of all available commitment definitions
11444
11603
  * This array contains instances of all commitment definitions
@@ -11455,10 +11614,10 @@ const COMMITMENT_REGISTRY = [
11455
11614
  new MemoryCommitmentDefinition('MEMORIES'),
11456
11615
  new StyleCommitmentDefinition('STYLE'),
11457
11616
  new StyleCommitmentDefinition('STYLES'),
11458
- new RuleCommitmentDefinition('RULE'),
11459
11617
  new RuleCommitmentDefinition('RULES'),
11460
- new LanguageCommitmentDefinition('LANGUAGE'),
11618
+ new RuleCommitmentDefinition('RULE'),
11461
11619
  new LanguageCommitmentDefinition('LANGUAGES'),
11620
+ new LanguageCommitmentDefinition('LANGUAGE'),
11462
11621
  new SampleCommitmentDefinition('SAMPLE'),
11463
11622
  new SampleCommitmentDefinition('EXAMPLE'),
11464
11623
  new FormatCommitmentDefinition('FORMAT'),
@@ -11498,6 +11657,7 @@ const COMMITMENT_REGISTRY = [
11498
11657
  new ClosedCommitmentDefinition(),
11499
11658
  new UseBrowserCommitmentDefinition(),
11500
11659
  new UseSearchEngineCommitmentDefinition(),
11660
+ new UseTimeCommitmentDefinition(),
11501
11661
  new UseMcpCommitmentDefinition(),
11502
11662
  new UseCommitmentDefinition(),
11503
11663
  // Not yet implemented commitments (using placeholder)
@@ -11586,42 +11746,23 @@ function getGroupedCommitmentDefinitions() {
11586
11746
  return $deepFreeze(groupedCommitments);
11587
11747
  }
11588
11748
  /**
11589
- * TODO: [🧠] Maybe create through standardized $register
11590
- * Note: [💞] Ignore a discrepancy between file name and entity name
11591
- */
11592
-
11593
- /**
11594
- * Creates an empty/basic agent model requirements object
11595
- * This serves as the starting point for the reduce-like pattern
11596
- * where each commitment applies its changes to build the final requirements
11597
- *
11598
- * @public exported from `@promptbook/core`
11599
- */
11600
- function createEmptyAgentModelRequirements() {
11601
- return {
11602
- systemMessage: '',
11603
- // modelName: 'gpt-5',
11604
- modelName: 'gemini-2.5-flash-lite',
11605
- temperature: 0.7,
11606
- topP: 0.9,
11607
- topK: 50,
11608
- };
11609
- }
11610
- /**
11611
- * Creates a basic agent model requirements with just the agent name
11612
- * This is used when we have an agent name but no commitments
11749
+ * Gets all function implementations provided by all commitments
11613
11750
  *
11614
11751
  * @public exported from `@promptbook/core`
11615
11752
  */
11616
- function createBasicAgentModelRequirements(agentName) {
11617
- const empty = createEmptyAgentModelRequirements();
11618
- return {
11619
- ...empty,
11620
- systemMessage: `You are ${agentName || 'AI Agent'}`,
11621
- };
11753
+ function getAllCommitmentsToolFunctions() {
11754
+ const allToolFunctions = {};
11755
+ for (const commitmentDefinition of getAllCommitmentDefinitions()) {
11756
+ const toolFunctions = commitmentDefinition.getToolFunctions();
11757
+ for (const [funcName, funcImpl] of Object.entries(toolFunctions)) {
11758
+ allToolFunctions[funcName] = funcImpl;
11759
+ }
11760
+ }
11761
+ return allToolFunctions;
11622
11762
  }
11623
11763
  /**
11624
- * TODO: [🐤] Deduplicate `AgentModelRequirements` and `ModelRequirements` model requirements
11764
+ * TODO: [🧠] Maybe create through standardized $register
11765
+ * Note: [💞] Ignore a discrepancy between file name and entity name
11625
11766
  */
11626
11767
 
11627
11768
  /**
@@ -11702,11 +11843,37 @@ function parseAgentSourceWithCommitments(agentSource) {
11702
11843
  let currentCommitment = null;
11703
11844
  // Process lines starting from after the agent name line
11704
11845
  const startIndex = agentNameLineIndex >= 0 ? agentNameLineIndex + 1 : 0;
11846
+ let isInsideCodeBlock = false;
11705
11847
  for (let i = startIndex; i < lines.length; i++) {
11706
11848
  const line = lines[i];
11707
11849
  if (line === undefined) {
11708
11850
  continue;
11709
11851
  }
11852
+ const trimmedLine = line.trim();
11853
+ // Check if this line starts or ends a code block
11854
+ if (trimmedLine.startsWith('```')) {
11855
+ isInsideCodeBlock = !isInsideCodeBlock;
11856
+ if (currentCommitment) {
11857
+ // If we are inside a commitment, the code block is part of it
11858
+ currentCommitment.contentLines.push(line);
11859
+ }
11860
+ else {
11861
+ // If we are not inside a commitment, the code block is non-commitment
11862
+ nonCommitmentLines.push(line);
11863
+ }
11864
+ continue;
11865
+ }
11866
+ if (isInsideCodeBlock) {
11867
+ if (currentCommitment) {
11868
+ // If we are inside a commitment and a code block, the line is part of the commitment
11869
+ currentCommitment.contentLines.push(line);
11870
+ }
11871
+ else {
11872
+ // If we are inside a code block but not a commitment, the line is non-commitment
11873
+ nonCommitmentLines.push(line);
11874
+ }
11875
+ continue;
11876
+ }
11710
11877
  // Check if this line starts a new commitment
11711
11878
  let foundNewCommitment = false;
11712
11879
  for (const definition of COMMITMENT_REGISTRY) {
@@ -11784,6 +11951,97 @@ function parseAgentSourceWithCommitments(agentSource) {
11784
11951
  };
11785
11952
  }
11786
11953
 
11954
+ /**
11955
+ * Plugin for importing agent books *(`.book` files)*
11956
+ *
11957
+ * @private [🥝] Maybe export the import plugins through some package
11958
+ */
11959
+ const AgentFileImportPlugin = {
11960
+ name: 'agent-file-import-plugin',
11961
+ canImport(mimeType) {
11962
+ // [🧠] Should we have a specific MIME type for agent books?
11963
+ // For now, let's assume it's identified by .book extension or certain MIME types if provided
11964
+ return mimeType === 'text/x-promptbook' || mimeType === 'application/x-promptbook';
11965
+ },
11966
+ import(content) {
11967
+ const parseResult = parseAgentSourceWithCommitments(content);
11968
+ // Bring only the agent corpus (non-commitment lines and relevant commitments)
11969
+ // Stripping the agent name (which is usually the first line)
11970
+ const corpus = parseResult.nonCommitmentLines
11971
+ .filter((line, index) => index > 0 || !parseResult.agentName)
11972
+ .join('\n')
11973
+ .trim();
11974
+ // Also include relevant commitments that make up the "corpus" of the agent
11975
+ // For example PERSONA, RULE, KNOWLEDGE
11976
+ const relevantCommitments = parseResult.commitments
11977
+ .filter((c) => ['PERSONA', 'RULE', 'KNOWLEDGE'].includes(c.type))
11978
+ .map((c) => `${c.type} ${c.content}`)
11979
+ .join('\n\n');
11980
+ return spaceTrim$1((block) => `
11981
+ ${block(relevantCommitments)}
11982
+
11983
+ ${block(corpus)}
11984
+ `).trim();
11985
+ },
11986
+ };
11987
+
11988
+ /**
11989
+ * Plugin for importing JSON files
11990
+ *
11991
+ * @private [🥝] Maybe export the import plugins through some package
11992
+ */
11993
+ const JsonFileImportPlugin = {
11994
+ name: 'json-file-import-plugin',
11995
+ canImport(mimeType) {
11996
+ return mimeType === 'application/json' || mimeType.endsWith('+json');
11997
+ },
11998
+ import(content) {
11999
+ try {
12000
+ const json = JSON.parse(content);
12001
+ const formattedJson = JSON.stringify(json, null, 4);
12002
+ return `\`\`\`json\n${formattedJson}\n\`\`\``;
12003
+ }
12004
+ catch (error) {
12005
+ // If JSON is invalid, still import it but maybe not as pretty JSON
12006
+ return `\`\`\`json\n${content}\n\`\`\``;
12007
+ }
12008
+ },
12009
+ };
12010
+
12011
+ /**
12012
+ * Plugin for importing generic text files
12013
+ *
12014
+ * @private [🥝] Maybe export the import plugins through some package
12015
+ */
12016
+ const TextFileImportPlugin = {
12017
+ name: 'text-file-import-plugin',
12018
+ canImport(mimeType) {
12019
+ return (mimeType === 'text/plain' ||
12020
+ mimeType === 'text/markdown' ||
12021
+ mimeType === 'text/x-typescript' ||
12022
+ mimeType === 'text/javascript' ||
12023
+ mimeType === 'text/css' ||
12024
+ mimeType === 'text/html' ||
12025
+ mimeType.startsWith('text/'));
12026
+ },
12027
+ import(content, mimeType) {
12028
+ const extension = mimeTypeToExtension(mimeType);
12029
+ const codeBlockType = extension || 'txt';
12030
+ return `\`\`\`${codeBlockType}\n${content}\n\`\`\``;
12031
+ },
12032
+ };
12033
+
12034
+ /**
12035
+ * All available file import plugins
12036
+ *
12037
+ * @private [🥝] Maybe export the import plugins through some package
12038
+ */
12039
+ const $fileImportPlugins = [
12040
+ AgentFileImportPlugin,
12041
+ JsonFileImportPlugin,
12042
+ TextFileImportPlugin,
12043
+ ];
12044
+
11787
12045
  /**
11788
12046
  * Parses parameters from text using both supported notations:
11789
12047
  * 1. @Parameter - single word parameter starting with @
@@ -11873,6 +12131,7 @@ function removeCommentsFromSystemMessage(systemMessage) {
11873
12131
  * @public exported from `@promptbook/core`
11874
12132
  */
11875
12133
  async function createAgentModelRequirementsWithCommitments(agentSource, modelName) {
12134
+ var _a;
11876
12135
  // Parse the agent source to extract commitments
11877
12136
  const parseResult = parseAgentSourceWithCommitments(agentSource);
11878
12137
  // Apply DELETE filtering: remove prior commitments tagged by parameters targeted by DELETE/CANCEL/DISCARD/REMOVE
@@ -11939,6 +12198,64 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
11939
12198
  }
11940
12199
  }
11941
12200
  }
12201
+ // Handle IMPORT commitments for generic files
12202
+ // Note: This logic could be moved to ImportCommitmentDefinition, but it needs to be asynchronous
12203
+ if (requirements.importedFileUrls && requirements.importedFileUrls.length > 0) {
12204
+ for (const fileUrl of requirements.importedFileUrls) {
12205
+ try {
12206
+ // 1. Mocked security check
12207
+ await mockedSecurityCheck(fileUrl);
12208
+ // 2. Fetch file content
12209
+ let content;
12210
+ let mimeType = null;
12211
+ if (isValidUrl(fileUrl)) {
12212
+ const response = await promptbookFetch(fileUrl);
12213
+ if (!response.ok) {
12214
+ throw new Error(`Failed to fetch ${fileUrl}: ${response.statusText}`);
12215
+ }
12216
+ content = await response.text();
12217
+ mimeType = response.headers.get('Content-Type');
12218
+ /*
12219
+ TODO: !!!! Commented out this case because we need to work in Browser-compatible mode in many packages, use passed `fs` instead
12220
+ } else if (isValidFilePath(fileUrl)) {
12221
+ // [x🟢x] This code is expected to run in Node environment if local files are used
12222
+ const fs = await import('fs/promises');
12223
+ content = await fs.readFile(fileUrl, 'utf-8');
12224
+ const extension = getFileExtension(fileUrl);
12225
+ mimeType = extensionToMimeType(extension as string);
12226
+ */
12227
+ }
12228
+ else {
12229
+ throw new Error(`Invalid file URL or path: ${fileUrl}`);
12230
+ }
12231
+ if (!mimeType) {
12232
+ mimeType = 'text/plain';
12233
+ }
12234
+ // Remove charset from mime type
12235
+ mimeType = mimeType.split(';')[0].trim();
12236
+ // 3. Prevent importing binary files (mocked check)
12237
+ if (isBinaryMimeType(mimeType)) {
12238
+ throw new Error(`Importing binary files is not allowed: ${mimeType}`);
12239
+ }
12240
+ // 4. Find appropriate plugin
12241
+ const plugin = $fileImportPlugins.find((p) => p.canImport(mimeType));
12242
+ if (!plugin) {
12243
+ throw new Error(`No import plugin found for MIME type: ${mimeType}`);
12244
+ }
12245
+ // 5. Process content
12246
+ const importedContent = await plugin.import(content, mimeType);
12247
+ // 6. Append to system message
12248
+ requirements = {
12249
+ ...requirements,
12250
+ systemMessage: requirements.systemMessage + '\n\n' + importedContent,
12251
+ };
12252
+ }
12253
+ catch (error) {
12254
+ console.warn(`Failed to import file ${fileUrl}:`, error);
12255
+ // Continue with other imports even if one fails
12256
+ }
12257
+ }
12258
+ }
11942
12259
  // Handle MCP servers (extract from original agent source)
11943
12260
  const mcpServers = extractMcpServers(agentSource);
11944
12261
  if (mcpServers.length > 0) {
@@ -11948,9 +12265,11 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
11948
12265
  };
11949
12266
  }
11950
12267
  // Add non-commitment lines to system message if they exist
12268
+ // Note: Filtering out horizontal lines (---) as requested
11951
12269
  const nonCommitmentContent = parseResult.nonCommitmentLines
11952
12270
  .filter((line, index) => index > 0 || !parseResult.agentName) // Skip first line if it's the agent name
11953
12271
  .filter((line) => line.trim()) // Remove empty lines
12272
+ .filter((line) => !/^[\s]*[-_*][\s]*[-_*][\s]*[-_*][\s]*[-_*]*[\s]*$/.test(line)) // Remove horizontal lines
11954
12273
  .join('\n')
11955
12274
  .trim();
11956
12275
  if (nonCommitmentContent) {
@@ -11959,6 +12278,26 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
11959
12278
  systemMessage: requirements.systemMessage + '\n\n' + nonCommitmentContent,
11960
12279
  };
11961
12280
  }
12281
+ // Add example interactions to the system message
12282
+ const examples = [];
12283
+ // 1. Initial message as an example agent response
12284
+ const initialMessage = (_a = parseResult.commitments.find((c) => c.type === 'INITIAL MESSAGE')) === null || _a === void 0 ? void 0 : _a.content;
12285
+ if (initialMessage) {
12286
+ examples.push(`Agent: ${initialMessage}`);
12287
+ }
12288
+ // 2. User and Agent message pairs
12289
+ if (requirements.samples && requirements.samples.length > 0) {
12290
+ for (const sample of requirements.samples) {
12291
+ examples.push(`User: ${sample.question}\nAgent: ${sample.answer}`);
12292
+ }
12293
+ }
12294
+ if (examples.length > 0) {
12295
+ const exampleInteractionsContent = `Example interaction:\n\n${examples.join('\n\n')}`;
12296
+ requirements = {
12297
+ ...requirements,
12298
+ systemMessage: requirements.systemMessage + '\n\n' + exampleInteractionsContent,
12299
+ };
12300
+ }
11962
12301
  // Remove comment lines (lines starting with #) from the final system message
11963
12302
  // while preserving the original content with comments in metadata
11964
12303
  const cleanedSystemMessage = removeCommentsFromSystemMessage(requirements.systemMessage);
@@ -11967,6 +12306,36 @@ async function createAgentModelRequirementsWithCommitments(agentSource, modelNam
11967
12306
  systemMessage: cleanedSystemMessage,
11968
12307
  };
11969
12308
  }
12309
+ /**
12310
+ * Mocked security check for imported files
12311
+ *
12312
+ * @param urlOrPath - The URL or local path of the file to check
12313
+ * @returns A promise that resolves if the file is safe
12314
+ */
12315
+ async function mockedSecurityCheck(urlOrPath) {
12316
+ // TODO: Implement proper security checks
12317
+ await new Promise((resolve) => setTimeout(resolve, 10)); // Mock async delay
12318
+ if (urlOrPath.includes('malicious')) {
12319
+ throw new Error(`Security check failed for: ${urlOrPath}`);
12320
+ }
12321
+ }
12322
+ /**
12323
+ * Checks if the given MIME type belongs to a binary file
12324
+ *
12325
+ * @param mimeType - The MIME type to check
12326
+ * @returns True if it's a binary MIME type
12327
+ */
12328
+ function isBinaryMimeType(mimeType) {
12329
+ const binaryPrefixes = [
12330
+ 'image/',
12331
+ 'video/',
12332
+ 'audio/',
12333
+ 'application/octet-stream',
12334
+ 'application/pdf',
12335
+ 'application/zip',
12336
+ ];
12337
+ return binaryPrefixes.some((prefix) => mimeType.startsWith(prefix));
12338
+ }
11970
12339
 
11971
12340
  /**
11972
12341
  * Normalizes agent name from arbitrary string to valid agent name
@@ -12027,7 +12396,24 @@ function parseAgentSource(agentSource) {
12027
12396
  const meta = {};
12028
12397
  const links = [];
12029
12398
  const capabilities = [];
12399
+ const samples = [];
12400
+ let pendingUserMessage = null;
12030
12401
  for (const commitment of parseResult.commitments) {
12402
+ if (commitment.type === 'INITIAL MESSAGE') {
12403
+ samples.push({ question: null, answer: commitment.content });
12404
+ continue;
12405
+ }
12406
+ if (commitment.type === 'USER MESSAGE') {
12407
+ pendingUserMessage = commitment.content;
12408
+ continue;
12409
+ }
12410
+ if (commitment.type === 'AGENT MESSAGE') {
12411
+ if (pendingUserMessage !== null) {
12412
+ samples.push({ question: pendingUserMessage, answer: commitment.content });
12413
+ pendingUserMessage = null;
12414
+ }
12415
+ continue;
12416
+ }
12031
12417
  if (commitment.type === 'USE BROWSER') {
12032
12418
  capabilities.push({
12033
12419
  type: 'browser',
@@ -12044,6 +12430,37 @@ function parseAgentSource(agentSource) {
12044
12430
  });
12045
12431
  continue;
12046
12432
  }
12433
+ if (commitment.type === 'USE TIME') {
12434
+ capabilities.push({
12435
+ type: 'time',
12436
+ label: 'Time',
12437
+ iconName: 'Clock',
12438
+ });
12439
+ continue;
12440
+ }
12441
+ if (commitment.type === 'IMPORT') {
12442
+ const content = spaceTrim$2(commitment.content).split('\n')[0] || '';
12443
+ let label = content;
12444
+ const iconName = 'Download';
12445
+ try {
12446
+ if (content.startsWith('http://') || content.startsWith('https://')) {
12447
+ const url = new URL(content);
12448
+ label = url.hostname.replace(/^www\./, '') + '.../' + url.pathname.split('/').pop();
12449
+ }
12450
+ else if (content.startsWith('./') || content.startsWith('../') || content.startsWith('/')) {
12451
+ label = content.split('/').pop() || content;
12452
+ }
12453
+ }
12454
+ catch (e) {
12455
+ // Invalid URL or path, keep default label
12456
+ }
12457
+ capabilities.push({
12458
+ type: 'knowledge',
12459
+ label,
12460
+ iconName,
12461
+ });
12462
+ continue;
12463
+ }
12047
12464
  if (commitment.type === 'KNOWLEDGE') {
12048
12465
  const content = spaceTrim$2(commitment.content).split('\n')[0] || '';
12049
12466
  let label = content;
@@ -12128,6 +12545,7 @@ function parseAgentSource(agentSource) {
12128
12545
  links,
12129
12546
  parameters,
12130
12547
  capabilities,
12548
+ samples,
12131
12549
  };
12132
12550
  }
12133
12551
  /**
@@ -16758,6 +17176,9 @@ function filterModels(llmTools, predicate) {
16758
17176
  return originalModels.filter(predicate);
16759
17177
  }
16760
17178
  },
17179
+ checkConfiguration() {
17180
+ return /* not await */ llmTools.checkConfiguration();
17181
+ },
16761
17182
  };
16762
17183
  // Helper function to validate if a model is allowed
16763
17184
  async function isModelAllowed(modelName) {
@@ -17064,31 +17485,14 @@ class MemoryStorage {
17064
17485
  * Intercepts LLM tools and counts total usage of the tools
17065
17486
  *
17066
17487
  * Note: It can take extended `LlmExecutionTools` and cache the
17488
+ * Note: Returns full proxy of all LLM tool properties and methods
17067
17489
  *
17068
17490
  * @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
17069
- * @returns LLM tools with same functionality with added total cost counting
17491
+ * @returns Full proxy of LLM tools with same functionality with added caching
17070
17492
  * @public exported from `@promptbook/core`
17071
17493
  */
17072
17494
  function cacheLlmTools(llmTools, options = {}) {
17073
17495
  const { storage = new MemoryStorage(), isCacheReloaded = false, isVerbose = DEFAULT_IS_VERBOSE } = options;
17074
- const proxyTools = {
17075
- ...llmTools,
17076
- // <- Note: [🥫]
17077
- get title() {
17078
- return `${llmTools.title} (cached)`;
17079
- // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
17080
- // <- TODO: [🧈][🧠] Does it make sense to suffix "(cached)"?
17081
- },
17082
- get description() {
17083
- return `${llmTools.description} (cached)`;
17084
- // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
17085
- // <- TODO: [🧈][🧠] Does it make sense to suffix "(cached)"?
17086
- },
17087
- listModels() {
17088
- // TODO: [🧠] Should be model listing also cached?
17089
- return /* not await */ llmTools.listModels();
17090
- },
17091
- };
17092
17496
  const callCommonModel = async (prompt) => {
17093
17497
  var _a;
17094
17498
  const { parameters, content, modelRequirements } = prompt;
@@ -17211,27 +17615,55 @@ function cacheLlmTools(llmTools, options = {}) {
17211
17615
  }
17212
17616
  return promptResult;
17213
17617
  };
17214
- if (llmTools.callChatModel !== undefined) {
17215
- proxyTools.callChatModel = async (prompt) => {
17216
- return /* not await */ callCommonModel(prompt);
17217
- };
17218
- }
17219
- if (llmTools.callCompletionModel !== undefined) {
17220
- proxyTools.callCompletionModel = async (prompt) => {
17221
- return /* not await */ callCommonModel(prompt);
17222
- };
17223
- }
17224
- if (llmTools.callEmbeddingModel !== undefined) {
17225
- proxyTools.callEmbeddingModel = async (prompt) => {
17226
- return /* not await */ callCommonModel(prompt);
17227
- };
17228
- }
17229
- if (llmTools.callImageGenerationModel !== undefined) {
17230
- proxyTools.callImageGenerationModel = async (prompt) => {
17231
- return /* not await */ callCommonModel(prompt);
17232
- };
17233
- }
17234
- // <- Note: [🤖]
17618
+ // Create a Proxy to intercept all property access and ensure full proxying of all properties
17619
+ const proxyTools = new Proxy(llmTools, {
17620
+ get(target, prop, receiver) {
17621
+ // Handle title property
17622
+ if (prop === 'title') {
17623
+ return `${target.title} (cached)`;
17624
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
17625
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(cached)"?
17626
+ }
17627
+ // Handle description property
17628
+ if (prop === 'description') {
17629
+ return `${target.description} (cached)`;
17630
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
17631
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(cached)"?
17632
+ }
17633
+ // Handle callChatModel method
17634
+ if (prop === 'callChatModel' && target.callChatModel !== undefined) {
17635
+ return async (prompt) => {
17636
+ return /* not await */ callCommonModel(prompt);
17637
+ };
17638
+ }
17639
+ // Handle callCompletionModel method
17640
+ if (prop === 'callCompletionModel' && target.callCompletionModel !== undefined) {
17641
+ return async (prompt) => {
17642
+ return /* not await */ callCommonModel(prompt);
17643
+ };
17644
+ }
17645
+ // Handle callEmbeddingModel method
17646
+ if (prop === 'callEmbeddingModel' && target.callEmbeddingModel !== undefined) {
17647
+ return async (prompt) => {
17648
+ return /* not await */ callCommonModel(prompt);
17649
+ };
17650
+ }
17651
+ // Handle callImageGenerationModel method
17652
+ if (prop === 'callImageGenerationModel' && target.callImageGenerationModel !== undefined) {
17653
+ return async (prompt) => {
17654
+ return /* not await */ callCommonModel(prompt);
17655
+ };
17656
+ }
17657
+ // <- Note: [🤖]
17658
+ // For all other properties and methods, delegate to the original target
17659
+ const value = Reflect.get(target, prop, receiver);
17660
+ // If it's a function, bind it to the target to preserve context
17661
+ if (typeof value === 'function') {
17662
+ return value.bind(target);
17663
+ }
17664
+ return value;
17665
+ },
17666
+ });
17235
17667
  return proxyTools;
17236
17668
  }
17237
17669
  /**
@@ -19217,7 +19649,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19217
19649
  * Calls OpenAI API to use a chat model with streaming.
19218
19650
  */
19219
19651
  async callChatModelStream(prompt, onProgress) {
19220
- var _a, _b, _c;
19652
+ var _a, _b, _c, _d;
19221
19653
  if (this.options.isVerbose) {
19222
19654
  console.info('💬 OpenAI callChatModel call', { prompt });
19223
19655
  }
@@ -19272,6 +19704,131 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19272
19704
  }
19273
19705
  // Always add the current user message
19274
19706
  threadMessages.push({ role: 'user', content: rawPromptContent });
19707
+ // Check if tools are being used - if so, use non-streaming mode
19708
+ const hasTools = modelRequirements.tools !== undefined && modelRequirements.tools.length > 0;
19709
+ const start = $getCurrentDate();
19710
+ let complete;
19711
+ // [🐱‍🚀] When tools are present, we need to use the non-streaming Runs API
19712
+ // because streaming doesn't support tool execution flow properly
19713
+ if (hasTools) {
19714
+ const rawRequest = {
19715
+ assistant_id: this.assistantId,
19716
+ thread: {
19717
+ messages: threadMessages,
19718
+ },
19719
+ tools: mapToolsToOpenAi(modelRequirements.tools),
19720
+ };
19721
+ if (this.options.isVerbose) {
19722
+ console.info(colors.bgWhite('rawRequest (non-streaming with tools)'), JSON.stringify(rawRequest, null, 4));
19723
+ }
19724
+ // Create thread and run
19725
+ const threadAndRun = await client.beta.threads.createAndRun(rawRequest);
19726
+ let run = threadAndRun;
19727
+ // Poll until run completes or requires action
19728
+ while (run.status === 'queued' || run.status === 'in_progress' || run.status === 'requires_action') {
19729
+ if (run.status === 'requires_action' && ((_a = run.required_action) === null || _a === void 0 ? void 0 : _a.type) === 'submit_tool_outputs') {
19730
+ // Execute tools
19731
+ const toolCalls = run.required_action.submit_tool_outputs.tool_calls;
19732
+ const toolOutputs = [];
19733
+ for (const toolCall of toolCalls) {
19734
+ if (toolCall.type === 'function') {
19735
+ const functionName = toolCall.function.name;
19736
+ const functionArgs = JSON.parse(toolCall.function.arguments);
19737
+ if (this.options.isVerbose) {
19738
+ console.info(`🔧 Executing tool: ${functionName}`, functionArgs);
19739
+ }
19740
+ // Get execution tools for script execution
19741
+ const executionTools = this.options
19742
+ .executionTools;
19743
+ if (!executionTools || !executionTools.script) {
19744
+ throw new PipelineExecutionError(`Model requested tool '${functionName}' but no executionTools.script were provided in OpenAiAssistantExecutionTools options`);
19745
+ }
19746
+ // TODO: [DRY] Use some common tool caller (similar to OpenAiCompatibleExecutionTools)
19747
+ const scriptTools = Array.isArray(executionTools.script)
19748
+ ? executionTools.script
19749
+ : [executionTools.script];
19750
+ let functionResponse;
19751
+ try {
19752
+ const scriptTool = scriptTools[0]; // <- TODO: [🧠] Which script tool to use?
19753
+ functionResponse = await scriptTool.execute({
19754
+ scriptLanguage: 'javascript',
19755
+ script: `
19756
+ const args = ${JSON.stringify(functionArgs)};
19757
+ return await ${functionName}(args);
19758
+ `,
19759
+ parameters: {}, // <- TODO: [🧠] What parameters to pass?
19760
+ });
19761
+ if (this.options.isVerbose) {
19762
+ console.info(`✅ Tool ${functionName} executed:`, functionResponse);
19763
+ }
19764
+ }
19765
+ catch (error) {
19766
+ assertsError(error);
19767
+ functionResponse = spaceTrim$2((block) => `
19768
+
19769
+ The invoked tool \`${functionName}\` failed with error:
19770
+
19771
+ \`\`\`json
19772
+ ${block(JSON.stringify(serializeError(error), null, 4))}
19773
+ \`\`\`
19774
+
19775
+ `);
19776
+ console.error(colors.bgRed(`❌ Error executing tool ${functionName}:`));
19777
+ console.error(error);
19778
+ }
19779
+ toolOutputs.push({
19780
+ tool_call_id: toolCall.id,
19781
+ output: functionResponse,
19782
+ });
19783
+ }
19784
+ }
19785
+ // Submit tool outputs
19786
+ run = await client.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, {
19787
+ tool_outputs: toolOutputs,
19788
+ });
19789
+ }
19790
+ else {
19791
+ // Wait a bit before polling again
19792
+ await new Promise((resolve) => setTimeout(resolve, 500));
19793
+ run = await client.beta.threads.runs.retrieve(run.thread_id, run.id);
19794
+ }
19795
+ }
19796
+ if (run.status !== 'completed') {
19797
+ throw new PipelineExecutionError(`Assistant run failed with status: ${run.status}`);
19798
+ }
19799
+ // Get messages from the thread
19800
+ const messages = await client.beta.threads.messages.list(run.thread_id);
19801
+ const assistantMessages = messages.data.filter((msg) => msg.role === 'assistant');
19802
+ if (assistantMessages.length === 0) {
19803
+ throw new PipelineExecutionError('No assistant messages found after run completion');
19804
+ }
19805
+ const lastMessage = assistantMessages[0];
19806
+ const textContent = lastMessage.content.find((c) => c.type === 'text');
19807
+ if (!textContent || textContent.type !== 'text') {
19808
+ throw new PipelineExecutionError('No text content in assistant response');
19809
+ }
19810
+ complete = $getCurrentDate();
19811
+ const resultContent = textContent.text.value;
19812
+ const usage = UNCERTAIN_USAGE;
19813
+ // Progress callback with final result
19814
+ const finalChunk = {
19815
+ content: resultContent,
19816
+ modelName: 'assistant',
19817
+ timing: { start, complete },
19818
+ usage,
19819
+ rawPromptContent,
19820
+ rawRequest,
19821
+ rawResponse: { run, messages: messages.data },
19822
+ };
19823
+ onProgress(finalChunk);
19824
+ return exportJson({
19825
+ name: 'promptResult',
19826
+ message: `Result of \`OpenAiAssistantExecutionTools.callChatModelStream\` (with tools)`,
19827
+ order: [],
19828
+ value: finalChunk,
19829
+ });
19830
+ }
19831
+ // Streaming mode (without tools)
19275
19832
  const rawRequest = {
19276
19833
  // TODO: [👨‍👨‍👧‍👧] ...modelSettings,
19277
19834
  // TODO: [👨‍👨‍👧‍👧][🧠] What about system message for assistants, does it make sense - combination of OpenAI assistants with Promptbook Personas
@@ -19282,10 +19839,8 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19282
19839
  tools: modelRequirements.tools === undefined ? undefined : mapToolsToOpenAi(modelRequirements.tools),
19283
19840
  // <- TODO: Add user identification here> user: this.options.user,
19284
19841
  };
19285
- const start = $getCurrentDate();
19286
- let complete;
19287
19842
  if (this.options.isVerbose) {
19288
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
19843
+ console.info(colors.bgWhite('rawRequest (streaming)'), JSON.stringify(rawRequest, null, 4));
19289
19844
  }
19290
19845
  const stream = await client.beta.threads.createAndRunStream(rawRequest);
19291
19846
  stream.on('connect', () => {
@@ -19321,6 +19876,15 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19321
19876
  console.info('messageDone', message);
19322
19877
  }
19323
19878
  });
19879
+ // TODO: [🐱‍🚀] Handle tool calls in assistants
19880
+ // Note: OpenAI Assistant streaming with tool calls requires special handling.
19881
+ // The stream will pause when a tool call is needed, and we need to:
19882
+ // 1. Wait for the run to reach 'requires_action' status
19883
+ // 2. Execute the tool calls
19884
+ // 3. Submit tool outputs via a separate API call (not on the stream)
19885
+ // 4. Continue the run
19886
+ // This requires switching to non-streaming mode or using the Runs API directly.
19887
+ // For now, tools with assistants should use the non-streaming chat completions API instead.
19324
19888
  const rawResponse = await stream.finalMessages();
19325
19889
  if (this.options.isVerbose) {
19326
19890
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -19331,10 +19895,10 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19331
19895
  if (rawResponse[0].content.length !== 1) {
19332
19896
  throw new PipelineExecutionError(`There is NOT 1 BUT ${rawResponse[0].content.length} finalMessages content from OpenAI`);
19333
19897
  }
19334
- if (((_a = rawResponse[0].content[0]) === null || _a === void 0 ? void 0 : _a.type) !== 'text') {
19335
- throw new PipelineExecutionError(`There is NOT 'text' BUT ${(_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type} finalMessages content type from OpenAI`);
19898
+ if (((_b = rawResponse[0].content[0]) === null || _b === void 0 ? void 0 : _b.type) !== 'text') {
19899
+ throw new PipelineExecutionError(`There is NOT 'text' BUT ${(_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.type} finalMessages content type from OpenAI`);
19336
19900
  }
19337
- const resultContent = (_c = rawResponse[0].content[0]) === null || _c === void 0 ? void 0 : _c.text.value;
19901
+ const resultContent = (_d = rawResponse[0].content[0]) === null || _d === void 0 ? void 0 : _d.text.value;
19338
19902
  // <- TODO: [🧠] There are also annotations, maybe use them
19339
19903
  // eslint-disable-next-line prefer-const
19340
19904
  complete = $getCurrentDate();
@@ -19402,7 +19966,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19402
19966
  throw new NotAllowed(`Creating new assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
19403
19967
  }
19404
19968
  // await this.playground();
19405
- const { name, instructions, knowledgeSources } = options;
19969
+ const { name, instructions, knowledgeSources, tools } = options;
19406
19970
  const client = await this.getClient();
19407
19971
  let vectorStoreId;
19408
19972
  // If knowledge sources are provided, create a vector store with them
@@ -19473,7 +20037,11 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19473
20037
  description: 'Assistant created via Promptbook',
19474
20038
  model: 'gpt-4o',
19475
20039
  instructions,
19476
- tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
20040
+ tools: [
20041
+ /* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */
20042
+ { type: 'file_search' },
20043
+ ...(tools === undefined ? [] : mapToolsToOpenAi(tools)),
20044
+ ],
19477
20045
  };
19478
20046
  // Attach vector store if created
19479
20047
  if (vectorStoreId) {
@@ -19498,7 +20066,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19498
20066
  if (!this.isCreatingNewAssistantsAllowed) {
19499
20067
  throw new NotAllowed(`Updating assistants is not allowed. Set \`isCreatingNewAssistantsAllowed: true\` in options to enable this feature.`);
19500
20068
  }
19501
- const { assistantId, name, instructions, knowledgeSources } = options;
20069
+ const { assistantId, name, instructions, knowledgeSources, tools } = options;
19502
20070
  const client = await this.getClient();
19503
20071
  let vectorStoreId;
19504
20072
  // If knowledge sources are provided, create a vector store with them
@@ -19567,7 +20135,11 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19567
20135
  const assistantUpdate = {
19568
20136
  name,
19569
20137
  instructions,
19570
- tools: [/* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */ { type: 'file_search' }],
20138
+ tools: [
20139
+ /* TODO: [🧠] Maybe add { type: 'code_interpreter' }, */
20140
+ { type: 'file_search' },
20141
+ ...(tools === undefined ? [] : mapToolsToOpenAi(tools)),
20142
+ ],
19571
20143
  };
19572
20144
  if (vectorStoreId) {
19573
20145
  assistantUpdate.tool_resources = {
@@ -19608,6 +20180,7 @@ class OpenAiAssistantExecutionTools extends OpenAiExecutionTools {
19608
20180
  */
19609
20181
  const DISCRIMINANT = 'OPEN_AI_ASSISTANT_V1';
19610
20182
  /**
20183
+ * TODO: [🙎] In `OpenAiAssistantExecutionTools` Allow to create abstract assistants with `isCreatingNewAssistantsAllowed`
19611
20184
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizard for those who want to use just OpenAI
19612
20185
  * TODO: Maybe make custom OpenAiError
19613
20186
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
@@ -19666,8 +20239,10 @@ class AgentLlmExecutionTools {
19666
20239
  }
19667
20240
  /**
19668
20241
  * Get cached or create agent model requirements
20242
+ *
20243
+ * Note: [🐤] This is names `getModelRequirements` *(not `getAgentModelRequirements`)* because in future these two will be united
19669
20244
  */
19670
- async getAgentModelRequirements() {
20245
+ async getModelRequirements() {
19671
20246
  if (this._cachedModelRequirements === null) {
19672
20247
  // Get available models from underlying LLM tools for best model selection
19673
20248
  const availableModels = await this.options.llmTools.listModels();
@@ -19737,9 +20312,25 @@ class AgentLlmExecutionTools {
19737
20312
  if (prompt.modelRequirements.modelVariant !== 'CHAT') {
19738
20313
  throw new Error('AgentLlmExecutionTools only supports chat prompts');
19739
20314
  }
19740
- const modelRequirements = await this.getAgentModelRequirements();
20315
+ const modelRequirements = await this.getModelRequirements();
19741
20316
  const chatPrompt = prompt;
19742
20317
  let underlyingLlmResult;
20318
+ // Create modified chat prompt with agent system message
20319
+ const promptWithAgentModelRequirements = {
20320
+ ...chatPrompt,
20321
+ modelRequirements: {
20322
+ ...chatPrompt.modelRequirements,
20323
+ ...modelRequirements,
20324
+ // Spread tools to convert readonly array to mutable
20325
+ tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
20326
+ // Prepend agent system message to existing system message
20327
+ systemMessage: modelRequirements.systemMessage +
20328
+ (chatPrompt.modelRequirements.systemMessage
20329
+ ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
20330
+ : ''),
20331
+ },
20332
+ };
20333
+ console.log('!!!! promptWithAgentModelRequirements:', promptWithAgentModelRequirements);
19743
20334
  if (OpenAiAssistantExecutionTools.isOpenAiAssistantExecutionTools(this.options.llmTools)) {
19744
20335
  const requirementsHash = SHA256(JSON.stringify(modelRequirements)).toString();
19745
20336
  const cached = AgentLlmExecutionTools.assistantCache.get(this.title);
@@ -19760,6 +20351,7 @@ class AgentLlmExecutionTools {
19760
20351
  name: this.title,
19761
20352
  instructions: modelRequirements.systemMessage,
19762
20353
  knowledgeSources: modelRequirements.knowledgeSources,
20354
+ tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
19763
20355
  });
19764
20356
  AgentLlmExecutionTools.assistantCache.set(this.title, {
19765
20357
  assistantId: assistant.assistantId,
@@ -19776,6 +20368,7 @@ class AgentLlmExecutionTools {
19776
20368
  name: this.title,
19777
20369
  instructions: modelRequirements.systemMessage,
19778
20370
  knowledgeSources: modelRequirements.knowledgeSources,
20371
+ tools: modelRequirements.tools ? [...modelRequirements.tools] : undefined,
19779
20372
  /*
19780
20373
  !!!
19781
20374
  metadata: {
@@ -19788,32 +20381,28 @@ class AgentLlmExecutionTools {
19788
20381
  requirementsHash,
19789
20382
  });
19790
20383
  }
19791
- underlyingLlmResult = await assistant.callChatModelStream(chatPrompt, onProgress);
20384
+ // Create modified chat prompt with agent system message specific to OpenAI Assistant
20385
+ const promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools = {
20386
+ ...promptWithAgentModelRequirements,
20387
+ modelRequirements: {
20388
+ ...promptWithAgentModelRequirements.modelRequirements,
20389
+ modelName: undefined,
20390
+ systemMessage: undefined,
20391
+ temperature: undefined, // <- Note: Let the Assistant use its default temperature
20392
+ },
20393
+ };
20394
+ console.log('!!!! promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools:', promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools);
20395
+ underlyingLlmResult = await assistant.callChatModelStream(promptWithAgentModelRequirementsForOpenAiAssistantExecutionTools, onProgress);
19792
20396
  }
19793
20397
  else {
19794
20398
  if (this.options.isVerbose) {
19795
20399
  console.log(`2️⃣ Creating Assistant ${this.title} on generic LLM execution tools...`);
19796
20400
  }
19797
- // Create modified chat prompt with agent system message
19798
- const modifiedChatPrompt = {
19799
- ...chatPrompt,
19800
- modelRequirements: {
19801
- ...chatPrompt.modelRequirements,
19802
- ...modelRequirements,
19803
- // Spread tools to convert readonly array to mutable
19804
- tools: modelRequirements.tools ? [...modelRequirements.tools] : chatPrompt.modelRequirements.tools,
19805
- // Prepend agent system message to existing system message
19806
- systemMessage: modelRequirements.systemMessage +
19807
- (chatPrompt.modelRequirements.systemMessage
19808
- ? `\n\n${chatPrompt.modelRequirements.systemMessage}`
19809
- : ''),
19810
- },
19811
- };
19812
20401
  if (this.options.llmTools.callChatModelStream) {
19813
- underlyingLlmResult = await this.options.llmTools.callChatModelStream(modifiedChatPrompt, onProgress);
20402
+ underlyingLlmResult = await this.options.llmTools.callChatModelStream(promptWithAgentModelRequirements, onProgress);
19814
20403
  }
19815
20404
  else if (this.options.llmTools.callChatModel) {
19816
- underlyingLlmResult = await this.options.llmTools.callChatModel(modifiedChatPrompt);
20405
+ underlyingLlmResult = await this.options.llmTools.callChatModel(promptWithAgentModelRequirements);
19817
20406
  onProgress(underlyingLlmResult);
19818
20407
  }
19819
20408
  else {
@@ -19842,7 +20431,8 @@ AgentLlmExecutionTools.assistantCache = new Map();
19842
20431
  * TODO: [🧠] Adding parameter substitution support (here or should be responsibility of the underlying LLM Tools)
19843
20432
  */
19844
20433
 
19845
- var _Agent_instances, _Agent_selfLearn;
20434
+ var _Agent_instances, _Agent_selfLearnSamples;
20435
+ // !!!!! import { RemoteAgent } from './RemoteAgent'; // <- [💞] <- !!!!!
19846
20436
  /**
19847
20437
  * Represents one AI Agent
19848
20438
  *
@@ -19902,6 +20492,10 @@ class Agent extends AgentLlmExecutionTools {
19902
20492
  * This is parsed from commitments like USE BROWSER, USE SEARCH ENGINE, KNOWLEDGE, etc.
19903
20493
  */
19904
20494
  this.capabilities = [];
20495
+ /**
20496
+ * List of sample conversations (question/answer pairs)
20497
+ */
20498
+ this.samples = [];
19905
20499
  /**
19906
20500
  * Metadata like image or color
19907
20501
  */
@@ -19911,12 +20505,13 @@ class Agent extends AgentLlmExecutionTools {
19911
20505
  this.agentSource = agentSource;
19912
20506
  this.agentSource.subscribe((source) => {
19913
20507
  this.updateAgentSource(source);
19914
- const { agentName, personaDescription, initialMessage, links, meta, capabilities } = parseAgentSource(source);
20508
+ const { agentName, personaDescription, initialMessage, links, meta, capabilities, samples } = parseAgentSource(source);
19915
20509
  this._agentName = agentName;
19916
20510
  this.personaDescription = personaDescription;
19917
20511
  this.initialMessage = initialMessage;
19918
20512
  this.links = links;
19919
20513
  this.capabilities = capabilities;
20514
+ this.samples = samples;
19920
20515
  this.meta = { ...this.meta, ...meta };
19921
20516
  });
19922
20517
  }
@@ -19928,10 +20523,10 @@ class Agent extends AgentLlmExecutionTools {
19928
20523
  async callChatModelStream(prompt, onProgress) {
19929
20524
  var _a;
19930
20525
  // [1] Check if the user is asking the same thing as in the samples
19931
- const modelRequirements = await this.getAgentModelRequirements();
20526
+ const modelRequirements = await this.getModelRequirements();
19932
20527
  if (modelRequirements.samples) {
19933
20528
  const normalizedPrompt = normalizeMessageText(prompt.content);
19934
- const sample = modelRequirements.samples.find((sample) => normalizeMessageText(sample.question) === normalizedPrompt);
20529
+ const sample = modelRequirements.samples.find((sample) => sample.question !== null && normalizeMessageText(sample.question) === normalizedPrompt);
19935
20530
  if (sample) {
19936
20531
  const now = new Date().toISOString();
19937
20532
  const result = {
@@ -19977,17 +20572,22 @@ class Agent extends AgentLlmExecutionTools {
19977
20572
  if ((_a = modelRequirements.metadata) === null || _a === void 0 ? void 0 : _a.isClosed) {
19978
20573
  return result;
19979
20574
  }
19980
- await __classPrivateFieldGet(this, _Agent_instances, "m", _Agent_selfLearn).call(this, prompt, result);
19981
- // <- TODO: !!!! Do not await self-learn, run in background with error handling
20575
+ // TODO: !!!!! Is this timed properly?
20576
+ // Note: [1] Do the append of the samples
20577
+ __classPrivateFieldGet(this, _Agent_instances, "m", _Agent_selfLearnSamples).call(this, prompt, result);
20578
+ /*
20579
+ !!!!!
20580
+ // Note: [2] Asynchronously call the teacher agent and invoke the silver link. When the teacher fails, keep just the samples
20581
+ this.#selfLearnTeacher(prompt, result).catch((error) => {
20582
+ if (this.options.isVerbose) {
20583
+ console.error('Failed to self-learn from teacher agent', error);
20584
+ }
20585
+ });
20586
+ */
19982
20587
  return result;
19983
20588
  }
19984
20589
  }
19985
- _Agent_instances = new WeakSet(), _Agent_selfLearn =
19986
- /**
19987
- * Self-learning: Appends the conversation and extracted knowledge to the agent source
19988
- */
19989
- async function _Agent_selfLearn(prompt, result) {
19990
- // Learning: Append the conversation sample to the agent source
20590
+ _Agent_instances = new WeakSet(), _Agent_selfLearnSamples = function _Agent_selfLearnSamples(prompt, result) {
19991
20591
  const learningExample = spaceTrim$2((block) => `
19992
20592
 
19993
20593
  ---
@@ -19999,52 +20599,9 @@ async function _Agent_selfLearn(prompt, result) {
19999
20599
  ${block(result.content)}
20000
20600
 
20001
20601
  `);
20002
- // Extract knowledge
20003
- let knowledgeBlock = '';
20004
- try {
20005
- const extractionPrompt = {
20006
- title: 'Knowledge Extraction',
20007
- modelRequirements: {
20008
- modelVariant: 'CHAT',
20009
- },
20010
- content: spaceTrim$2((block) => `
20011
- You are an AI agent that is learning from a conversation.
20012
-
20013
- Here is the conversation so far:
20014
-
20015
- User: ${block(prompt.content)}
20016
- Agent: ${block(result.content)}
20017
-
20018
- Extract any new knowledge, facts, or important information that should be remembered for future interactions.
20019
- Format the output as a list of KNOWLEDGE blocks.
20020
- If there is no new knowledge, return nothing.
20021
-
20022
- Example output:
20023
- KNOWLEDGE The user's name is Alice.
20024
- KNOWLEDGE The project deadline is next Friday.
20025
- `),
20026
- pipelineUrl: 'https://github.com/webgptorg/promptbook/blob/main/prompts/knowledge-extraction.ptbk.md',
20027
- parameters: {},
20028
- };
20029
- if (this.options.llmTools.callChatModel) {
20030
- const extractionResult = await this.options.llmTools.callChatModel(extractionPrompt);
20031
- const extractedContent = extractionResult.content;
20032
- if (extractedContent.includes('KNOWLEDGE')) {
20033
- knowledgeBlock = '\n\n' + spaceTrim$2(extractedContent);
20034
- }
20035
- }
20036
- else {
20037
- // TODO: [🧠] Fallback to callChatModelStream if callChatModel is not available
20038
- }
20039
- }
20040
- catch (error) {
20041
- if (this.options.isVerbose) {
20042
- console.warn('Failed to extract knowledge', error);
20043
- }
20044
- }
20045
20602
  // Append to the current source
20046
20603
  const currentSource = this.agentSource.value;
20047
- const newSource = padBook(validateBook(spaceTrim$2(currentSource) + '\n\n' + learningExample + knowledgeBlock));
20604
+ const newSource = padBook(validateBook(spaceTrim$2(currentSource) + '\n\n' + learningExample));
20048
20605
  // Update the source (which will trigger the subscription and update the underlying tools)
20049
20606
  this.agentSource.next(newSource);
20050
20607
  };
@@ -21286,7 +21843,7 @@ const OpenAiSdkTranspiler = {
21286
21843
  {
21287
21844
  role: 'system',
21288
21845
  content: spaceTrim(\`
21289
- ${block(modelRequirements.systemMessage)}
21846
+ ${block(modelRequirements.systemMessage.split('`').join('\\`'))}
21290
21847
  \`),
21291
21848
  },
21292
21849
  ];
@@ -21372,7 +21929,7 @@ const OpenAiSdkTranspiler = {
21372
21929
  {
21373
21930
  role: 'system',
21374
21931
  content: spaceTrim(\`
21375
- ${block(modelRequirements.systemMessage)}
21932
+ ${block(modelRequirements.systemMessage.split('`').join('\\`'))}
21376
21933
  \`),
21377
21934
  },
21378
21935
  ];
@@ -21433,7 +21990,16 @@ const CORE_AGENTS_SERVER = {
21433
21990
  *
21434
21991
  * @public exported from `@promptbook/core`
21435
21992
  */
21436
- const CORE_AGENTS_SERVER_WELL_KNOWN_AGENT_NAMES = { ADAM: 'adam', TEACHER: 'teacher' };
21993
+ const CORE_AGENTS_SERVER_WELL_KNOWN_AGENT_NAMES = {
21994
+ /**
21995
+ * The default ancestor agent for new agents
21996
+ */
21997
+ ADAM: 'adam',
21998
+ /**
21999
+ * Agent that knows book syntax and can help with self-learning
22000
+ */
22001
+ TEACHER: 'teacher',
22002
+ };
21437
22003
  // <- TODO: [🆎] Allow to override (set) well-known agent names via Metadata
21438
22004
  /**
21439
22005
  * Available agents servers for the Promptbook
@@ -21542,29 +22108,82 @@ function $randomItem(...items) {
21542
22108
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
21543
22109
  */
21544
22110
 
21545
- const PERSONALITIES = [
21546
- 'Friendly and helpful AI agent.',
21547
- 'Professional and efficient virtual assistant.',
21548
- 'Creative and imaginative digital companion.',
21549
- 'Knowledgeable and informative AI guide.',
21550
- 'Empathetic and understanding support bot.',
21551
- 'Energetic and enthusiastic conversational partner.',
21552
- 'Calm and patient virtual helper.',
21553
- 'Curious and inquisitive AI explorer.',
21554
- 'Witty and humorous digital friend.',
21555
- 'Serious and focused AI consultant.',
21556
- ];
22111
+ const PERSONALITIES = {
22112
+ ENGLISH: [
22113
+ 'Friendly and helpful AI agent.',
22114
+ 'Professional and efficient virtual assistant.',
22115
+ 'Creative and imaginative digital companion.',
22116
+ 'Knowledgeable and informative AI guide.',
22117
+ 'Empathetic and understanding support bot.',
22118
+ 'Energetic and enthusiastic conversational partner.',
22119
+ 'Calm and patient virtual helper.',
22120
+ 'Curious and inquisitive AI explorer.',
22121
+ 'Witty and humorous digital friend.',
22122
+ 'Serious and focused AI consultant.',
22123
+ ],
22124
+ CZECH: [
22125
+ // spell-checker:disable
22126
+ 'Přátelský a nápomocný AI agent.',
22127
+ 'Profesionální a efektivní virtuální asistent.',
22128
+ 'Kreativní a nápaditý digitální společník.',
22129
+ 'Zkušený a informativní AI průvodce.',
22130
+ 'Empatický a chápavý robot podpory.',
22131
+ 'Energický a nadšený partner pro konverzaci.',
22132
+ 'Klidný a trpělivý virtuální pomocník.',
22133
+ 'Zvědavý a hloubavý AI průzkumník.',
22134
+ 'Vtipný a humorný digitální přítel.',
22135
+ 'Vážný a soustředěný AI konzultant.',
22136
+ // spell-checker:enable
22137
+ ],
22138
+ };
21557
22139
  /**
21558
22140
  * Generates a random agent persona description.
21559
22141
  *
21560
22142
  * This function selects a random personality profile from a predefined pool
21561
22143
  * of common AI agent characteristics (e.g., friendly, professional, creative).
21562
22144
  *
22145
+ * @param language - The language code (e.g. 'ENGLISH', 'CZECH')
21563
22146
  * @returns A string describing the agent's persona
21564
22147
  * @private internal helper function
21565
22148
  */
21566
- function $randomAgentPersona() {
21567
- return $randomItem(...PERSONALITIES);
22149
+ function $randomAgentPersona(language = 'ENGLISH') {
22150
+ const normalizedLanguage = language.toUpperCase().trim();
22151
+ const personalities = PERSONALITIES[normalizedLanguage] || PERSONALITIES['ENGLISH'];
22152
+ return $randomItem(...personalities);
22153
+ }
22154
+ /**
22155
+ * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
22156
+ */
22157
+
22158
+ const RULES = {
22159
+ ENGLISH: [
22160
+ 'Always prioritize user privacy and data security.',
22161
+ 'Respond in a friendly and approachable manner.',
22162
+ 'Avoid using technical jargon unless necessary.',
22163
+ 'Maintain a neutral and unbiased tone in all responses.',
22164
+ ],
22165
+ CZECH: [
22166
+ // spell-checker:disable
22167
+ 'Vždy upřednostňujte soukromí uživatelů a bezpečnost dat.',
22168
+ 'Odpovídejte přátelským a přístupným způsobem.',
22169
+ 'Vyhněte se používání technického žargonu, pokud to není nutné.',
22170
+ 'Udržujte ve všech odpovědích neutrální a nezaujatý tón.',
22171
+ // spell-checker:enable
22172
+ ],
22173
+ };
22174
+ /**
22175
+ * Generates a random agent rule description.
22176
+ *
22177
+ * This function selects a random rule
22178
+ *
22179
+ * @param language - The language code (e.g. 'ENGLISH', 'CZECH')
22180
+ * @returns A string describing the agent's rule
22181
+ * @private internal helper function
22182
+ */
22183
+ function $randomAgentRule(language = 'ENGLISH') {
22184
+ const normalizedLanguage = language.toUpperCase().trim();
22185
+ const rules = RULES[normalizedLanguage] || RULES['ENGLISH'];
22186
+ return $randomItem(...rules);
21568
22187
  }
21569
22188
  /**
21570
22189
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
@@ -21850,10 +22469,9 @@ function getNamePool(language) {
21850
22469
  * @public exported from `@promptbook/core`
21851
22470
  */
21852
22471
  function $generateBookBoilerplate(options) {
21853
- // eslint-disable-next-line prefer-const
21854
- let { agentName, parentAgentName = 'Adam', personaDescription, meta, namePool = 'ENGLISH' } = options || {};
21855
- // eslint-disable-next-line prefer-const
21856
- let { image, color, ...restMeta } = meta || {};
22472
+ const { parentAgentName = 'Adam', initialRules = [], meta, namePool = 'ENGLISH' } = options || {};
22473
+ let { agentName, personaDescription } = options || {};
22474
+ let { color } = meta || {};
21857
22475
  if (!agentName) {
21858
22476
  const namePoolInstance = getNamePool(namePool);
21859
22477
  const randomFullnameWithColor = namePoolInstance.generateName();
@@ -21861,20 +22479,25 @@ function $generateBookBoilerplate(options) {
21861
22479
  color = color || randomFullnameWithColor.color;
21862
22480
  }
21863
22481
  if (!personaDescription) {
21864
- personaDescription = $randomAgentPersona();
22482
+ personaDescription = $randomAgentPersona(namePool);
22483
+ }
22484
+ if (initialRules.length === 0) {
22485
+ initialRules.push($randomAgentRule(namePool));
21865
22486
  }
21866
22487
  const agentSource = validateBook(spaceTrim$2((block) => `
21867
22488
  ${agentName}
21868
22489
 
21869
22490
  META COLOR ${color || PROMPTBOOK_COLOR.toHex()}
21870
- META FONT Playfair Display, sans-serif
21871
22491
  PERSONA ${block(personaDescription)}
22492
+ ${block(initialRules.map((rule) => `RULE ${rule}`).join('\n'))}
21872
22493
  `));
22494
+ // Note: `META FONT Playfair Display, sans-serif` was removed for now
22495
+ // <- TODO: [🈲] Simple and object-constructive way how to create new books
21873
22496
  return agentSource;
21874
22497
  }
21875
22498
  /**
21876
22499
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
21877
22500
  */
21878
22501
 
21879
- export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CORE_AGENTS_SERVER, CORE_AGENTS_SERVER_WELL_KNOWN_AGENT_NAMES, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_RECURSION, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LEGAL_ENTITY, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, PUBLIC_AGENTS_SERVERS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createDefaultAgentName, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getCommitmentDefinition, getGroupedCommitmentDefinitions, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, normalizeAgentName, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
22502
+ export { $bookTranspilersRegister, $generateBookBoilerplate, $llmToolsMetadataRegister, $llmToolsRegister, $scrapersMetadataRegister, $scrapersRegister, ADMIN_EMAIL, ADMIN_GITHUB_NAME, API_REQUEST_TIMEOUT, AbstractFormatError, Agent, AgentCollectionInSupabase, AgentLlmExecutionTools, AuthenticationError, BIG_DATASET_TRESHOLD, BOOK_LANGUAGE_VERSION, BlackholeStorage, BoilerplateError, BoilerplateFormfactorDefinition, CLAIM, CLI_APP_ID, CORE_AGENTS_SERVER, CORE_AGENTS_SERVER_WELL_KNOWN_AGENT_NAMES, CallbackInterfaceTools, ChatbotFormfactorDefinition, CollectionError, CompletionFormfactorDefinition, CsvFormatError, CsvFormatParser, DEFAULT_AGENTS_DIRNAME, DEFAULT_BOOK, DEFAULT_BOOKS_DIRNAME, DEFAULT_BOOK_OUTPUT_PARAMETER_NAME, DEFAULT_BOOK_TITLE, DEFAULT_CSV_SETTINGS, DEFAULT_DOWNLOAD_CACHE_DIRNAME, DEFAULT_EXECUTION_CACHE_DIRNAME, DEFAULT_GET_PIPELINE_COLLECTION_FUNCTION_NAME, DEFAULT_INTERMEDIATE_FILES_STRATEGY, DEFAULT_IS_AUTO_INSTALLED, DEFAULT_IS_VERBOSE, DEFAULT_MAX_EXECUTION_ATTEMPTS, DEFAULT_MAX_FILE_SIZE, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, DEFAULT_MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, DEFAULT_MAX_PARALLEL_COUNT, DEFAULT_MAX_RECURSION, DEFAULT_MAX_REQUESTS_PER_MINUTE, DEFAULT_PIPELINE_COLLECTION_BASE_FILENAME, DEFAULT_PROMPT_TASK_TITLE, DEFAULT_REMOTE_SERVER_URL, DEFAULT_SCRAPE_CACHE_DIRNAME, DEFAULT_TASK_SIMULATED_DURATION_MS, DEFAULT_TASK_TITLE, DatabaseError, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, ExpectError, FAILED_VALUE_PLACEHOLDER, FORMFACTOR_DEFINITIONS, FormattedBookInMarkdownTranspiler, GENERIC_PIPELINE_INTERFACE, GeneratorFormfactorDefinition, GenericFormfactorDefinition, ImageGeneratorFormfactorDefinition, KnowledgeScrapeError, LimitReachedError, MANDATORY_CSV_SETTINGS, MAX_FILENAME_LENGTH, MODEL_ORDERS, MODEL_TRUST_LEVELS, MODEL_VARIANTS, MatcherFormfactorDefinition, MemoryStorage, MissingToolsError, MultipleLlmExecutionTools, NAME, NonTaskSectionTypes, NotAllowed, NotFoundError, NotYetImplementedCommitmentDefinition, NotYetImplementedError, ORDER_OF_PIPELINE_JSON, OpenAiSdkTranspiler, PADDING_LINES, PENDING_VALUE_PLACEHOLDER, PLAYGROUND_APP_ID, PROMPTBOOK_CHAT_COLOR, PROMPTBOOK_COLOR, PROMPTBOOK_ENGINE_VERSION, PROMPTBOOK_ERRORS, PROMPTBOOK_LEGAL_ENTITY, PROMPTBOOK_LOGO_URL, PROMPTBOOK_SYNTAX_COLORS, PUBLIC_AGENTS_SERVERS, ParseError, PipelineExecutionError, PipelineLogicError, PipelineUrlError, PrefixStorage, PromptbookFetchError, RESERVED_PARAMETER_NAMES, RemoteAgent, SET_IS_VERBOSE, SectionTypes, SheetsFormfactorDefinition, TaskTypes, TextFormatParser, TranslatorFormfactorDefinition, UNCERTAIN_USAGE, UNCERTAIN_ZERO_VALUE, USER_CHAT_COLOR, UnexpectedError, WrappedError, ZERO_USAGE, ZERO_VALUE, _AgentMetadata, _AgentRegistration, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _BoilerplateScraperMetadataRegistration, _DeepseekMetadataRegistration, _DocumentScraperMetadataRegistration, _GoogleMetadataRegistration, _LegacyDocumentScraperMetadataRegistration, _MarkdownScraperMetadataRegistration, _MarkitdownScraperMetadataRegistration, _OllamaMetadataRegistration, _OpenAiAssistantMetadataRegistration, _OpenAiCompatibleMetadataRegistration, _OpenAiMetadataRegistration, _PdfScraperMetadataRegistration, _WebsiteScraperMetadataRegistration, aboutPromptbookInformation, addUsage, book, cacheLlmTools, compilePipeline, computeAgentHash, computeCosineSimilarity, countUsage, createAgentLlmExecutionTools, createAgentModelRequirements, createAgentModelRequirementsWithCommitments, createBasicAgentModelRequirements, createDefaultAgentName, createEmptyAgentModelRequirements, createLlmToolsFromConfiguration, createPipelineCollectionFromJson, createPipelineCollectionFromPromise, createPipelineCollectionFromUrl, createPipelineExecutor, createPipelineSubcollection, embeddingVectorToString, executionReportJsonToString, extractParameterNamesFromTask, filterModels, generatePlaceholderAgentProfileImageUrl, getAllCommitmentDefinitions, getAllCommitmentTypes, getAllCommitmentsToolFunctions, getCommitmentDefinition, getGroupedCommitmentDefinitions, getPipelineInterface, getSingleLlmExecutionTools, identificationToPromptbookToken, isCommitmentSupported, isPassingExpectations, isPipelineImplementingInterface, isPipelineInterfacesEqual, isPipelinePrepared, isValidBook, isValidPipelineString, joinLlmExecutionTools, limitTotalUsage, makeKnowledgeSourceHandler, migratePipeline, normalizeAgentName, padBook, parseAgentSource, parseParameters, parsePipeline, pipelineCollectionToJson, pipelineJsonToString, prepareKnowledgePieces, preparePersona, preparePipeline, prettifyPipelineString, promptbookFetch, promptbookTokenToIdentification, unpreparePipeline, usageToHuman, usageToWorktime, validateBook, validatePipeline, validatePipelineString };
21880
22503
  //# sourceMappingURL=index.es.js.map