@promptbook/pdf 0.105.0-0 → 0.105.0-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/esm/index.es.js +80 -63
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/types.index.d.ts +4 -0
  5. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +10 -3
  6. package/esm/typings/src/book-2.0/agent-source/AgentModelRequirements.d.ts +11 -1
  7. package/esm/typings/src/book-2.0/agent-source/communication-samples.test.d.ts +1 -0
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.blocks.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.import.test.d.ts +1 -0
  10. package/esm/typings/src/book-2.0/agent-source/parseAgentSource.import.test.d.ts +1 -0
  11. package/esm/typings/src/book-2.0/agent-source/parseAgentSourceWithCommitments.blocks.test.d.ts +1 -0
  12. package/esm/typings/src/commitments/USE_TIME/USE_TIME.d.ts +40 -0
  13. package/esm/typings/src/commitments/USE_TIME/USE_TIME.test.d.ts +1 -0
  14. package/esm/typings/src/commitments/_base/BaseCommitmentDefinition.d.ts +8 -0
  15. package/esm/typings/src/commitments/_base/CommitmentDefinition.d.ts +8 -0
  16. package/esm/typings/src/commitments/index.d.ts +11 -2
  17. package/esm/typings/src/config.d.ts +1 -0
  18. package/esm/typings/src/import-plugins/$fileImportPlugins.d.ts +7 -0
  19. package/esm/typings/src/import-plugins/AgentFileImportPlugin.d.ts +7 -0
  20. package/esm/typings/src/import-plugins/FileImportPlugin.d.ts +24 -0
  21. package/esm/typings/src/import-plugins/JsonFileImportPlugin.d.ts +7 -0
  22. package/esm/typings/src/import-plugins/TextFileImportPlugin.d.ts +7 -0
  23. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +2 -1
  24. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/countUsage.d.ts +2 -2
  25. package/esm/typings/src/llm-providers/agent/Agent.d.ts +9 -2
  26. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +3 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiAssistantExecutionTools.d.ts +10 -0
  28. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  29. package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +6 -1
  30. package/esm/typings/src/types/ModelRequirements.d.ts +6 -12
  31. package/esm/typings/src/utils/execCommand/$execCommandNormalizeOptions.d.ts +2 -3
  32. package/esm/typings/src/utils/execCommand/ExecCommandOptions.d.ts +7 -1
  33. package/esm/typings/src/utils/organization/keepImported.d.ts +9 -0
  34. package/esm/typings/src/utils/organization/keepTypeImported.d.ts +0 -1
  35. package/esm/typings/src/utils/random/$generateBookBoilerplate.d.ts +4 -0
  36. package/esm/typings/src/utils/random/$randomAgentPersona.d.ts +2 -1
  37. package/esm/typings/src/utils/random/$randomAgentRule.d.ts +14 -0
  38. package/esm/typings/src/version.d.ts +1 -1
  39. package/package.json +2 -2
  40. package/umd/index.umd.js +80 -63
  41. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -24,7 +24,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.105.0-0';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-3';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -999,6 +999,7 @@ const PROMPTBOOK_COLOR = Color.fromString('promptbook');
999
999
  SEPARATOR: Color.fromHex('#cccccc'),
1000
1000
  COMMITMENT: Color.fromHex('#DA0F78'),
1001
1001
  PARAMETER: Color.fromHex('#8e44ad'),
1002
+ CODE_BLOCK: Color.fromHex('#7700ffff'),
1002
1003
  });
1003
1004
  // <- TODO: [🧠][🈵] Using `Color` here increases the package size approx 3kb, maybe remove it
1004
1005
  /**
@@ -3833,74 +3834,90 @@ function addUsage(...usageItems) {
3833
3834
  * in real-time through an observable.
3834
3835
  *
3835
3836
  * @param llmTools - The LLM tools to be intercepted and tracked
3836
- * @returns An augmented version of the tools that includes usage tracking capabilities
3837
+ * @returns Full proxy of the tools with added usage tracking capabilities
3837
3838
  * @public exported from `@promptbook/core`
3838
3839
  */
3839
3840
  function countUsage(llmTools) {
3840
3841
  let totalUsage = ZERO_USAGE;
3841
3842
  const spending = new Subject();
3842
- const proxyTools = {
3843
- get title() {
3844
- return `${llmTools.title} (+usage)`;
3845
- // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3846
- // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3847
- },
3848
- get description() {
3849
- return `${llmTools.description} (+usage)`;
3850
- // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3851
- // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3852
- },
3853
- checkConfiguration() {
3854
- return /* not await */ llmTools.checkConfiguration();
3855
- },
3856
- listModels() {
3857
- return /* not await */ llmTools.listModels();
3858
- },
3859
- spending() {
3860
- return spending.asObservable();
3861
- },
3862
- getTotalUsage() {
3863
- // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3864
- return totalUsage;
3843
+ // Create a Proxy to intercept all property access and ensure full proxying of all properties
3844
+ const proxyTools = new Proxy(llmTools, {
3845
+ get(target, prop, receiver) {
3846
+ // Handle title property
3847
+ if (prop === 'title') {
3848
+ return `${target.title} (+usage)`;
3849
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3850
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3851
+ }
3852
+ // Handle description property
3853
+ if (prop === 'description') {
3854
+ return `${target.description} (+usage)`;
3855
+ // <- TODO: [🧈] Maybe standartize the suffix when wrapping `LlmExecutionTools` up
3856
+ // <- TODO: [🧈][🧠] Does it make sense to suffix "(+usage)"?
3857
+ }
3858
+ // Handle spending method (new method added by this wrapper)
3859
+ if (prop === 'spending') {
3860
+ return () => {
3861
+ return spending.asObservable();
3862
+ };
3863
+ }
3864
+ // Handle getTotalUsage method (new method added by this wrapper)
3865
+ if (prop === 'getTotalUsage') {
3866
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3867
+ return () => {
3868
+ return totalUsage;
3869
+ };
3870
+ }
3871
+ // Handle callChatModel method with usage counting
3872
+ if (prop === 'callChatModel' && target.callChatModel !== undefined) {
3873
+ return async (prompt) => {
3874
+ // console.info('[🚕] callChatModel through countTotalUsage');
3875
+ const promptResult = await target.callChatModel(prompt);
3876
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3877
+ spending.next(promptResult.usage);
3878
+ return promptResult;
3879
+ };
3880
+ }
3881
+ // Handle callCompletionModel method with usage counting
3882
+ if (prop === 'callCompletionModel' && target.callCompletionModel !== undefined) {
3883
+ return async (prompt) => {
3884
+ // console.info('[🚕] callCompletionModel through countTotalUsage');
3885
+ const promptResult = await target.callCompletionModel(prompt);
3886
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3887
+ spending.next(promptResult.usage);
3888
+ return promptResult;
3889
+ };
3890
+ }
3891
+ // Handle callEmbeddingModel method with usage counting
3892
+ if (prop === 'callEmbeddingModel' && target.callEmbeddingModel !== undefined) {
3893
+ return async (prompt) => {
3894
+ // console.info('[🚕] callEmbeddingModel through countTotalUsage');
3895
+ const promptResult = await target.callEmbeddingModel(prompt);
3896
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3897
+ spending.next(promptResult.usage);
3898
+ return promptResult;
3899
+ };
3900
+ }
3901
+ // Handle callImageGenerationModel method with usage counting
3902
+ if (prop === 'callImageGenerationModel' && target.callImageGenerationModel !== undefined) {
3903
+ return async (prompt) => {
3904
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3905
+ const promptResult = await target.callImageGenerationModel(prompt);
3906
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3907
+ spending.next(promptResult.usage);
3908
+ return promptResult;
3909
+ };
3910
+ }
3911
+ // <- Note: [🤖]
3912
+ // For all other properties and methods, delegate to the original target
3913
+ const value = Reflect.get(target, prop, receiver);
3914
+ // If it's a function, bind it to the target to preserve context
3915
+ if (typeof value === 'function') {
3916
+ return value.bind(target);
3917
+ }
3918
+ return value;
3865
3919
  },
3866
- };
3867
- if (llmTools.callChatModel !== undefined) {
3868
- proxyTools.callChatModel = async (prompt) => {
3869
- // console.info('[🚕] callChatModel through countTotalUsage');
3870
- const promptResult = await llmTools.callChatModel(prompt);
3871
- totalUsage = addUsage(totalUsage, promptResult.usage);
3872
- spending.next(promptResult.usage);
3873
- return promptResult;
3874
- };
3875
- }
3876
- if (llmTools.callCompletionModel !== undefined) {
3877
- proxyTools.callCompletionModel = async (prompt) => {
3878
- // console.info('[🚕] callCompletionModel through countTotalUsage');
3879
- const promptResult = await llmTools.callCompletionModel(prompt);
3880
- totalUsage = addUsage(totalUsage, promptResult.usage);
3881
- spending.next(promptResult.usage);
3882
- return promptResult;
3883
- };
3884
- }
3885
- if (llmTools.callEmbeddingModel !== undefined) {
3886
- proxyTools.callEmbeddingModel = async (prompt) => {
3887
- // console.info('[🚕] callEmbeddingModel through countTotalUsage');
3888
- const promptResult = await llmTools.callEmbeddingModel(prompt);
3889
- totalUsage = addUsage(totalUsage, promptResult.usage);
3890
- spending.next(promptResult.usage);
3891
- return promptResult;
3892
- };
3893
- }
3894
- if (llmTools.callImageGenerationModel !== undefined) {
3895
- proxyTools.callImageGenerationModel = async (prompt) => {
3896
- // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3897
- const promptResult = await llmTools.callImageGenerationModel(prompt);
3898
- totalUsage = addUsage(totalUsage, promptResult.usage);
3899
- spending.next(promptResult.usage);
3900
- return promptResult;
3901
- };
3902
- }
3903
- // <- Note: [🤖]
3920
+ });
3904
3921
  return proxyTools;
3905
3922
  }
3906
3923
  /**