@promptbook/node 0.61.0 → 0.62.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/esm/index.es.js +123 -17
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -1
  5. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  6. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  7. package/esm/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  8. package/esm/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  9. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +4 -0
  10. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +12 -2
  11. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +12 -2
  12. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +7 -1
  13. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  14. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +15 -0
  15. package/{umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → esm/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  16. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  17. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  19. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  20. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  21. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  23. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  24. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  25. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  26. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  27. package/package.json +3 -3
  28. package/umd/index.umd.js +145 -21
  29. package/umd/index.umd.js.map +1 -1
  30. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  31. package/umd/typings/src/_packages/core.index.d.ts +2 -1
  32. package/umd/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  33. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  34. package/umd/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  35. package/umd/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  36. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +4 -0
  37. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +12 -2
  38. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +12 -2
  39. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +7 -1
  40. package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  41. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +15 -0
  42. package/{esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → umd/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  43. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  44. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  45. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  46. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  47. package/umd/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  48. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  49. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  50. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  51. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  52. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
  53. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  54. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +0 -11
  55. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +0 -11
package/esm/index.es.js CHANGED
@@ -4,6 +4,7 @@ import { join } from 'path';
4
4
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
5
  import { format } from 'prettier';
6
6
  import parserHtml from 'prettier/parser-html';
7
+ import * as dotenv from 'dotenv';
7
8
  import Anthropic from '@anthropic-ai/sdk';
8
9
  import OpenAI from 'openai';
9
10
 
@@ -459,7 +460,8 @@ function promptTemplateParameterJsonToString(promptTemplateParameterJson) {
459
460
  /**
460
461
  * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
461
462
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
462
- * TODO: Escape all
463
+ * TODO: [🏛] Maybe make some markdown builder
464
+ * TODO: [🏛] Escape all
463
465
  */
464
466
 
465
467
  /**
@@ -654,7 +656,7 @@ function forEachAsync(array, options, callbackfunction) {
654
656
  });
655
657
  }
656
658
 
657
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
659
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
660
 
659
661
  /**
660
662
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2033,6 +2035,9 @@ function joinLlmExecutionTools() {
2033
2035
  }
2034
2036
  return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
2035
2037
  }
2038
+ /**
2039
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2040
+ */
2036
2041
 
2037
2042
  /**
2038
2043
  * Determine if the pipeline is fully prepared
@@ -2268,7 +2273,7 @@ function union() {
2268
2273
  /**
2269
2274
  * The version of the Promptbook library
2270
2275
  */
2271
- var PROMPTBOOK_VERSION = '0.61.0-30';
2276
+ var PROMPTBOOK_VERSION = '0.62.0-0';
2272
2277
  // TODO: !!!! List here all the versions and annotate + put into script
2273
2278
 
2274
2279
  /**
@@ -3161,6 +3166,7 @@ function createPipelineExecutor(options) {
3161
3166
  }
3162
3167
  /**
3163
3168
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3169
+ * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3164
3170
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3165
3171
  * TODO: [♈] Probbably move expectations from templates to parameters
3166
3172
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -3369,6 +3375,86 @@ TODO: [🧊] This is how it can look in future
3369
3375
  * [ ] One piece can have multiple sources
3370
3376
  */
3371
3377
 
3378
+ /**
3379
+ * Intercepts LLM tools and counts total usage of the tools
3380
+ *
3381
+ * @param llmTools LLM tools to be intercepted with usage counting
3382
+ * @returns LLM tools with same functionality with added total cost counting
3383
+ */
3384
+ function countTotalUsage(llmTools) {
3385
+ var _this = this;
3386
+ var totalUsage = ZERO_USAGE;
3387
+ var proxyTools = {
3388
+ get title() {
3389
+ // TODO: [🧠] Maybe put here some suffix
3390
+ return llmTools.title;
3391
+ },
3392
+ get description() {
3393
+ // TODO: [🧠] Maybe put here some suffix
3394
+ return llmTools.description;
3395
+ },
3396
+ listModels: function () {
3397
+ return /* not await */ llmTools.listModels();
3398
+ },
3399
+ getTotalUsage: function () {
3400
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3401
+ return totalUsage;
3402
+ },
3403
+ };
3404
+ if (llmTools.callChatModel !== undefined) {
3405
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3406
+ var promptResult;
3407
+ return __generator(this, function (_a) {
3408
+ switch (_a.label) {
3409
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
3410
+ case 1:
3411
+ promptResult = _a.sent();
3412
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3413
+ return [2 /*return*/, promptResult];
3414
+ }
3415
+ });
3416
+ }); };
3417
+ }
3418
+ if (llmTools.callCompletionModel !== undefined) {
3419
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3420
+ var promptResult;
3421
+ return __generator(this, function (_a) {
3422
+ switch (_a.label) {
3423
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
3424
+ case 1:
3425
+ promptResult = _a.sent();
3426
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3427
+ return [2 /*return*/, promptResult];
3428
+ }
3429
+ });
3430
+ }); };
3431
+ }
3432
+ if (llmTools.callEmbeddingModel !== undefined) {
3433
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3434
+ var promptResult;
3435
+ return __generator(this, function (_a) {
3436
+ switch (_a.label) {
3437
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
3438
+ case 1:
3439
+ promptResult = _a.sent();
3440
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3441
+ return [2 /*return*/, promptResult];
3442
+ }
3443
+ });
3444
+ }); };
3445
+ }
3446
+ // <- Note: [🤖]
3447
+ return proxyTools;
3448
+ }
3449
+ /**
3450
+ * TODO: [🔼] !!! Export via `@promptbookcore/`
3451
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
3452
+ * TODO: [🧠] Is there some meaningfull way how to test this util
3453
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
3454
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
3455
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3456
+ */
3457
+
3372
3458
  /**
3373
3459
  * Prepares the persona for the pipeline
3374
3460
  *
@@ -3492,23 +3578,24 @@ function prepareTemplates(pipeline, options) {
3492
3578
  */
3493
3579
  function preparePipeline(pipeline, options) {
3494
3580
  return __awaiter(this, void 0, void 0, function () {
3495
- var _a, maxParallelCount, parameters, promptTemplates,
3581
+ var llmTools, _a, maxParallelCount, _b, isVerbose, parameters, promptTemplates,
3496
3582
  /*
3497
3583
  <- TODO: [🧠][0] `promptbookVersion` */
3498
3584
  knowledgeSources /*
3499
3585
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3500
- <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3586
+ <- TODO: [🧊] `preparations` */, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3501
3587
  var _this = this;
3502
- return __generator(this, function (_b) {
3503
- switch (_b.label) {
3588
+ return __generator(this, function (_c) {
3589
+ switch (_c.label) {
3504
3590
  case 0:
3505
- _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3591
+ llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3506
3592
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3593
+ llmToolsWithUsage = countTotalUsage(llmTools);
3507
3594
  currentPreparation = {
3508
3595
  id: 1,
3509
3596
  // TODO: [🍥]> date: $currentDate(),
3510
3597
  promptbookVersion: PROMPTBOOK_VERSION,
3511
- modelUsage: ZERO_USAGE,
3598
+ usage: ZERO_USAGE,
3512
3599
  };
3513
3600
  preparations = [
3514
3601
  // ...preparations
@@ -3520,7 +3607,11 @@ function preparePipeline(pipeline, options) {
3520
3607
  var modelRequirements, preparedPersona;
3521
3608
  return __generator(this, function (_a) {
3522
3609
  switch (_a.label) {
3523
- case 0: return [4 /*yield*/, preparePersona(persona.description, options)];
3610
+ case 0: return [4 /*yield*/, preparePersona(persona.description, {
3611
+ llmTools: llmToolsWithUsage,
3612
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3613
+ isVerbose: isVerbose,
3614
+ })];
3524
3615
  case 1:
3525
3616
  modelRequirements = _a.sent();
3526
3617
  preparedPersona = __assign(__assign({}, persona), { modelRequirements: modelRequirements, preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] });
@@ -3530,20 +3621,30 @@ function preparePipeline(pipeline, options) {
3530
3621
  });
3531
3622
  }); })];
3532
3623
  case 1:
3533
- _b.sent();
3624
+ _c.sent();
3534
3625
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3535
- return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, options)];
3626
+ return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, {
3627
+ llmTools: llmToolsWithUsage,
3628
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3629
+ isVerbose: isVerbose,
3630
+ })];
3536
3631
  case 2:
3537
- partialknowledgePiecesPrepared = _b.sent();
3632
+ partialknowledgePiecesPrepared = _c.sent();
3538
3633
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3539
3634
  return [4 /*yield*/, prepareTemplates({
3540
3635
  parameters: parameters,
3541
3636
  promptTemplates: promptTemplates,
3542
3637
  knowledgePiecesCount: knowledgePiecesPrepared.length,
3543
- }, options)];
3638
+ }, {
3639
+ llmTools: llmToolsWithUsage,
3640
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3641
+ isVerbose: isVerbose,
3642
+ })];
3544
3643
  case 3:
3545
- promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
3644
+ promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3546
3645
  // ----- /Templates preparation -----
3646
+ // Note: Count total usage
3647
+ currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3547
3648
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3548
3649
  }
3549
3650
  });
@@ -3554,7 +3655,6 @@ function preparePipeline(pipeline, options) {
3554
3655
  * TODO: Write tests for `preparePipeline`
3555
3656
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3556
3657
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3557
- * TODO: [🎐] !!!!! Use here countTotalUsage
3558
3658
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3559
3659
  */
3560
3660
 
@@ -5927,7 +6027,7 @@ function computeUsageCounts(content) {
5927
6027
  * @private utility for initializating UncertainNumber
5928
6028
  */
5929
6029
  function uncertainNumber(value) {
5930
- if (value === null || value === undefined || Number.isNaN(NaN)) {
6030
+ if (value === null || value === undefined || Number.isNaN(value)) {
5931
6031
  return { value: 0, isUncertain: true };
5932
6032
  }
5933
6033
  return { value: value };
@@ -6241,6 +6341,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6241
6341
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6242
6342
  * TODO: Maybe make custom OpenaiError
6243
6343
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6344
+ * TODO: [🍜] Auto use anonymous server in browser
6244
6345
  */
6245
6346
 
6246
6347
  /**
@@ -6934,6 +7035,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
6934
7035
  *
6935
7036
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
6936
7037
  *
7038
+ * @@@ .env
7039
+ *
6937
7040
  * It looks for environment variables:
6938
7041
  * - `process.env.OPENAI_API_KEY`
6939
7042
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -6946,6 +7049,7 @@ function createLlmToolsFromEnv(options) {
6946
7049
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6947
7050
  }
6948
7051
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7052
+ dotenv.config(); // <- TODO: !!!!!! Double check [🟢]
6949
7053
  var llmTools = [];
6950
7054
  if (typeof process.env.OPENAI_API_KEY === 'string') {
6951
7055
  llmTools.push(new OpenAiExecutionTools({
@@ -6970,6 +7074,7 @@ function createLlmToolsFromEnv(options) {
6970
7074
  }
6971
7075
  }
6972
7076
  /**
7077
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
6973
7078
  * TODO: [🔼] !!! Export via `@promptbook/node`
6974
7079
  * TODO: @@@ write discussion about this - wizzard
6975
7080
  * TODO: Add Azure
@@ -6977,6 +7082,7 @@ function createLlmToolsFromEnv(options) {
6977
7082
  * TODO: [🧠] Is there some meaningfull way how to test this util
6978
7083
  * TODO: [🧠] Maybe pass env as argument
6979
7084
  * Note: [🟢] This code should never be published outside of `@promptbook/node`
7085
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
6980
7086
  */
6981
7087
 
6982
7088
  export { PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };