@promptbook/cli 0.61.0 → 0.62.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/esm/index.es.js +196 -46
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -1
  4. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  5. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  6. package/esm/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  7. package/esm/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  8. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +1 -0
  9. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +3 -2
  10. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  11. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  13. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/LlmExecutionToolsWithTotalCost.d.ts → count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts} +5 -2
  14. package/{umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → esm/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  15. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  16. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  17. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  18. package/package.json +4 -4
  19. package/umd/index.umd.js +196 -46
  20. package/umd/index.umd.js.map +1 -1
  21. package/umd/typings/src/_packages/core.index.d.ts +2 -1
  22. package/umd/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  23. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  24. package/umd/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  25. package/umd/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  26. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +1 -0
  27. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +3 -2
  28. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  29. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +1 -1
  30. package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  31. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/LlmExecutionToolsWithTotalCost.d.ts → count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts} +5 -2
  32. package/{esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → umd/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  33. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  34. package/umd/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  35. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
package/esm/index.es.js CHANGED
@@ -150,7 +150,7 @@ new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined'
150
150
  /**
151
151
  * The version of the Promptbook library
152
152
  */
153
- var PROMPTBOOK_VERSION = '0.61.0-30';
153
+ var PROMPTBOOK_VERSION = '0.61.0';
154
154
  // TODO: !!!! List here all the versions and annotate + put into script
155
155
 
156
156
  /**
@@ -552,7 +552,8 @@ function promptTemplateParameterJsonToString(promptTemplateParameterJson) {
552
552
  /**
553
553
  * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
554
554
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
555
- * TODO: Escape all
555
+ * TODO: [🏛] Maybe make some markdown builder
556
+ * TODO: [🏛] Escape all
556
557
  */
557
558
 
558
559
  /**
@@ -747,7 +748,7 @@ function forEachAsync(array, options, callbackfunction) {
747
748
  });
748
749
  }
749
750
 
750
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
751
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
751
752
 
752
753
  /**
753
754
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2126,6 +2127,9 @@ function joinLlmExecutionTools() {
2126
2127
  }
2127
2128
  return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
2128
2129
  }
2130
+ /**
2131
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2132
+ */
2129
2133
 
2130
2134
  /**
2131
2135
  * Determine if the pipeline is fully prepared
@@ -3248,6 +3252,7 @@ function createPipelineExecutor(options) {
3248
3252
  }
3249
3253
  /**
3250
3254
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3255
+ * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3251
3256
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3252
3257
  * TODO: [♈] Probbably move expectations from templates to parameters
3253
3258
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -3456,6 +3461,85 @@ TODO: [🧊] This is how it can look in future
3456
3461
  * [ ] One piece can have multiple sources
3457
3462
  */
3458
3463
 
3464
+ /**
3465
+ * Intercepts LLM tools and counts total usage of the tools
3466
+ *
3467
+ * @param llmTools LLM tools to be intercepted with usage counting
3468
+ * @returns LLM tools with same functionality with added total cost counting
3469
+ */
3470
+ function countTotalUsage(llmTools) {
3471
+ var _this = this;
3472
+ var totalUsage = ZERO_USAGE;
3473
+ var proxyTools = {
3474
+ get title() {
3475
+ // TODO: [🧠] Maybe put here some suffix
3476
+ return llmTools.title;
3477
+ },
3478
+ get description() {
3479
+ // TODO: [🧠] Maybe put here some suffix
3480
+ return llmTools.description;
3481
+ },
3482
+ listModels: function () {
3483
+ return /* not await */ llmTools.listModels();
3484
+ },
3485
+ get totalUsage() {
3486
+ return totalUsage;
3487
+ },
3488
+ };
3489
+ if (llmTools.callChatModel !== undefined) {
3490
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3491
+ var promptResult;
3492
+ return __generator(this, function (_a) {
3493
+ switch (_a.label) {
3494
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
3495
+ case 1:
3496
+ promptResult = _a.sent();
3497
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3498
+ return [2 /*return*/, promptResult];
3499
+ }
3500
+ });
3501
+ }); };
3502
+ }
3503
+ if (llmTools.callCompletionModel !== undefined) {
3504
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3505
+ var promptResult;
3506
+ return __generator(this, function (_a) {
3507
+ switch (_a.label) {
3508
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
3509
+ case 1:
3510
+ promptResult = _a.sent();
3511
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3512
+ return [2 /*return*/, promptResult];
3513
+ }
3514
+ });
3515
+ }); };
3516
+ }
3517
+ if (llmTools.callEmbeddingModel !== undefined) {
3518
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3519
+ var promptResult;
3520
+ return __generator(this, function (_a) {
3521
+ switch (_a.label) {
3522
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
3523
+ case 1:
3524
+ promptResult = _a.sent();
3525
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3526
+ return [2 /*return*/, promptResult];
3527
+ }
3528
+ });
3529
+ }); };
3530
+ }
3531
+ // <- Note: [🤖]
3532
+ return proxyTools;
3533
+ }
3534
+ /**
3535
+ * TODO: [🔼] !!! Export via `@promptbookcore/`
3536
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
3537
+ * TODO: [🧠] Is there some meaningfull way how to test this util
3538
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
3539
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
3540
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3541
+ */
3542
+
3459
3543
  /**
3460
3544
  * Prepares the persona for the pipeline
3461
3545
  *
@@ -3579,18 +3663,19 @@ function prepareTemplates(pipeline, options) {
3579
3663
  */
3580
3664
  function preparePipeline(pipeline, options) {
3581
3665
  return __awaiter(this, void 0, void 0, function () {
3582
- var _a, maxParallelCount, parameters, promptTemplates,
3666
+ var llmTools, _a, maxParallelCount, _b, isVerbose, parameters, promptTemplates,
3583
3667
  /*
3584
3668
  <- TODO: [🧠][0] `promptbookVersion` */
3585
3669
  knowledgeSources /*
3586
3670
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3587
- <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3671
+ <- TODO: [🧊] `preparations` */, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3588
3672
  var _this = this;
3589
- return __generator(this, function (_b) {
3590
- switch (_b.label) {
3673
+ return __generator(this, function (_c) {
3674
+ switch (_c.label) {
3591
3675
  case 0:
3592
- _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3676
+ llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3593
3677
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3678
+ llmToolsWithUsage = countTotalUsage(llmTools);
3594
3679
  currentPreparation = {
3595
3680
  id: 1,
3596
3681
  // TODO: [🍥]> date: $currentDate(),
@@ -3607,7 +3692,11 @@ function preparePipeline(pipeline, options) {
3607
3692
  var modelRequirements, preparedPersona;
3608
3693
  return __generator(this, function (_a) {
3609
3694
  switch (_a.label) {
3610
- case 0: return [4 /*yield*/, preparePersona(persona.description, options)];
3695
+ case 0: return [4 /*yield*/, preparePersona(persona.description, {
3696
+ llmTools: llmToolsWithUsage,
3697
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3698
+ isVerbose: isVerbose,
3699
+ })];
3611
3700
  case 1:
3612
3701
  modelRequirements = _a.sent();
3613
3702
  preparedPersona = __assign(__assign({}, persona), { modelRequirements: modelRequirements, preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] });
@@ -3617,20 +3706,30 @@ function preparePipeline(pipeline, options) {
3617
3706
  });
3618
3707
  }); })];
3619
3708
  case 1:
3620
- _b.sent();
3709
+ _c.sent();
3621
3710
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3622
- return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, options)];
3711
+ return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, {
3712
+ llmTools: llmToolsWithUsage,
3713
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3714
+ isVerbose: isVerbose,
3715
+ })];
3623
3716
  case 2:
3624
- partialknowledgePiecesPrepared = _b.sent();
3717
+ partialknowledgePiecesPrepared = _c.sent();
3625
3718
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3626
3719
  return [4 /*yield*/, prepareTemplates({
3627
3720
  parameters: parameters,
3628
3721
  promptTemplates: promptTemplates,
3629
3722
  knowledgePiecesCount: knowledgePiecesPrepared.length,
3630
- }, options)];
3723
+ }, {
3724
+ llmTools: llmToolsWithUsage,
3725
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3726
+ isVerbose: isVerbose,
3727
+ })];
3631
3728
  case 3:
3632
- promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
3729
+ promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3633
3730
  // ----- /Templates preparation -----
3731
+ // Note: Count total usage
3732
+ currentPreparation.modelUsage = llmToolsWithUsage.totalUsage;
3634
3733
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3635
3734
  }
3636
3735
  });
@@ -3641,7 +3740,6 @@ function preparePipeline(pipeline, options) {
3641
3740
  * TODO: Write tests for `preparePipeline`
3642
3741
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3643
3742
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3644
- * TODO: [🎐] !!!!! Use here countTotalUsage
3645
3743
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3646
3744
  */
3647
3745
 
@@ -5957,6 +6055,69 @@ function listAllFiles(path, isRecursive) {
5957
6055
  * Note: [🟢] This code should never be published outside of `@pipeline/node`
5958
6056
  */
5959
6057
 
6058
+ /**
6059
+ * Stringify the PipelineJson with proper formatting
6060
+ *
6061
+ * Note: [0] It can be used for more JSON types like whole collection of pipelines, single knowledge piece, etc.
6062
+ * Note: In contrast to JSON.stringify, this function ensures that **embedding index** is on single line
6063
+ */
6064
+ function stringifyPipelineJson(pipeline) {
6065
+ var pipelineJsonStringified = JSON.stringify(pipeline, null, 4);
6066
+ for (var i = 0; i < LOOP_LIMIT; i++) {
6067
+ pipelineJsonStringified = pipelineJsonStringified.replace(/(-?0\.\d+),[\n\s]+(-?0\.\d+)/gms, "$1".concat(REPLACING_NONCE, "$2"));
6068
+ }
6069
+ pipelineJsonStringified = pipelineJsonStringified.split(REPLACING_NONCE).join(', ');
6070
+ pipelineJsonStringified += '\n';
6071
+ return pipelineJsonStringified;
6072
+ }
6073
+ /**
6074
+ * TODO: [🐝] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
6075
+ * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
6076
+ * TODO: [🧠] Maybe more elegant solution than replacing via regex
6077
+ * TODO: [🍙] Make some standart order of json properties
6078
+ */
6079
+
6080
+ /**
6081
+ * Function usageToWorktime will take usage and estimate saved worktime in hours of reading / writing
6082
+ *
6083
+ * Note: This is an estimate based of theese sources:
6084
+ * - https://jecas.cz/doba-cteni
6085
+ * - https://www.originalnitonery.cz/blog/psani-vsemi-deseti-se-muzete-naucit-i-sami-doma
6086
+ */
6087
+ function usageToWorktime(usage) {
6088
+ var value = usage.input.wordsCount.value / (200 /* words per minute */ * 60) +
6089
+ usage.output.wordsCount.value / (40 /* words per minute */ * 60);
6090
+ var isUncertain = usage.input.wordsCount.isUncertain || usage.output.wordsCount.isUncertain;
6091
+ var uncertainNumber = { value: value };
6092
+ if (isUncertain === true) {
6093
+ uncertainNumber.isUncertain = true;
6094
+ }
6095
+ return uncertainNumber;
6096
+ }
6097
+
6098
+ /**
6099
+ * Function `usageToHuman` will take usage and convert it to human readable report
6100
+ */
6101
+ function usageToHuman(usage) {
6102
+ var report = 'Usage:';
6103
+ var uncertainNumberToHuman = function (_a) {
6104
+ var value = _a.value, isUncertain = _a.isUncertain;
6105
+ return "".concat(isUncertain ? 'approximately ' : '').concat(Math.round(value * 100) / 100);
6106
+ };
6107
+ report += '\n' + "- Cost ".concat(uncertainNumberToHuman(usage.price), " USD");
6108
+ report += '\n' + "- Saved ".concat(uncertainNumberToHuman(usageToWorktime(usage)), " hours of human time");
6109
+ return spaceTrim(report);
6110
+ }
6111
+ /**
6112
+ * TODO: Use "$1" not "1 USD"
6113
+ * TODO: Use markdown formatting like "Cost approximately **$1**"
6114
+ * TODO: Report in minutes, seconds, days NOT 0.1 hours
6115
+ * TODO: [🧠] Maybe make from `uncertainNumberToHuman` separate exported utility
6116
+ * TODO: When negligible usage, report "Negligible" or just don't report it
6117
+ * TODO: [🧠] Maybe use "~" instead of "approximately"
6118
+ * TODO: [🏛] Maybe make some markdown builder
6119
+ */
6120
+
5960
6121
  /**
5961
6122
  * This error type indicates that you try to use a feature that is not available in the current environment
5962
6123
  */
@@ -7153,6 +7314,7 @@ function createLlmToolsFromEnv(options) {
7153
7314
  * TODO: [🧠] Is there some meaningfull way how to test this util
7154
7315
  * TODO: [🧠] Maybe pass env as argument
7155
7316
  * Note: [🟢] This code should never be published outside of `@promptbook/node`
7317
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7156
7318
  */
7157
7319
 
7158
7320
  /**
@@ -7220,14 +7382,17 @@ function $currentDate() {
7220
7382
  /**
7221
7383
  * Intercepts LLM tools and counts total usage of the tools
7222
7384
  *
7223
- * @param llmTools LLM tools to be intercepted with usage counting
7385
+ * Note: It can take extended `LlmExecutionTools` and cache the
7386
+ *
7387
+ * @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
7224
7388
  * @returns LLM tools with same functionality with added total cost counting
7225
7389
  */
7226
7390
  function cacheLlmTools(llmTools, options) {
7227
7391
  var _this = this;
7228
7392
  if (options === void 0) { options = {}; }
7229
7393
  var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a;
7230
- var proxyTools = {
7394
+ var proxyTools = __assign(__assign({}, llmTools), {
7395
+ // <- TODO: !!!!!! Is this working?
7231
7396
  get title() {
7232
7397
  // TODO: [🧠] Maybe put here some suffix
7233
7398
  return llmTools.title;
@@ -7235,12 +7400,10 @@ function cacheLlmTools(llmTools, options) {
7235
7400
  get description() {
7236
7401
  // TODO: [🧠] Maybe put here some suffix
7237
7402
  return llmTools.description;
7238
- },
7239
- listModels: function () {
7403
+ }, listModels: function () {
7240
7404
  // TODO: [🧠] Should be model listing also cached?
7241
7405
  return /* not await */ llmTools.listModels();
7242
- },
7243
- };
7406
+ } });
7244
7407
  var callCommonModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
7245
7408
  var key, cacheItem, promptResult, _a;
7246
7409
  return __generator(this, function (_b) {
@@ -7313,10 +7476,11 @@ function cacheLlmTools(llmTools, options) {
7313
7476
  }
7314
7477
  /**
7315
7478
  * TODO: [🔼] !!! Export via `@promptbook/core`
7316
- * TODO: @@@ write discussion about this and storages
7317
- * write how to combine multiple interceptors
7318
7479
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
7319
7480
  * TODO: [🧠] Is there some meaningfull way how to test this util
7481
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7482
+ * @@@ write discussion about this and storages
7483
+ * @@@ write how to combine multiple interceptors
7320
7484
  */
7321
7485
 
7322
7486
  /**
@@ -7328,34 +7492,15 @@ function getLlmToolsForCli() {
7328
7492
  if (!isRunningInNode()) {
7329
7493
  throw new EnvironmentMismatchError('Function `getLlmToolsForTestingAndScriptsAndPlayground` works only in Node.js environment');
7330
7494
  }
7331
- return cacheLlmTools(createLlmToolsFromEnv(), {
7495
+ return cacheLlmTools(countTotalUsage(
7496
+ // <- Note: for example here we don`t want the [🌯]
7497
+ createLlmToolsFromEnv()), {
7332
7498
  storage: new FilesStorage({ cacheFolderPath: join(process.cwd(), EXECUTIONS_CACHE_DIRNAME) }),
7333
7499
  });
7334
7500
  }
7335
7501
  /**
7336
7502
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
7337
- */
7338
-
7339
- /**
7340
- * Stringify the PipelineJson with proper formatting
7341
- *
7342
- * Note: [0] It can be used for more JSON types like whole collection of pipelines, single knowledge piece, etc.
7343
- * Note: In contrast to JSON.stringify, this function ensures that **embedding index** is on single line
7344
- */
7345
- function stringifyPipelineJson(pipeline) {
7346
- var pipelineJsonStringified = JSON.stringify(pipeline, null, 4);
7347
- for (var i = 0; i < LOOP_LIMIT; i++) {
7348
- pipelineJsonStringified = pipelineJsonStringified.replace(/(-?0\.\d+),[\n\s]+(-?0\.\d+)/gms, "$1".concat(REPLACING_NONCE, "$2"));
7349
- }
7350
- pipelineJsonStringified = pipelineJsonStringified.split(REPLACING_NONCE).join(', ');
7351
- pipelineJsonStringified += '\n';
7352
- return pipelineJsonStringified;
7353
- }
7354
- /**
7355
- * TODO: [🐝] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
7356
- * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
7357
- * TODO: [🧠] Maybe more elegant solution than replacing via regex
7358
- * TODO: [🍙] Make some standart order of json properties
7503
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7359
7504
  */
7360
7505
 
7361
7506
  /**
@@ -7506,6 +7651,11 @@ function initializeMakeCommand(program) {
7506
7651
  _f.sent();
7507
7652
  _f.label = 23;
7508
7653
  case 23:
7654
+ if (isVerbose) {
7655
+ // TODO: !!!!!! Test that this works
7656
+ console.info(colors.green("Collection builded"));
7657
+ console.info(colors.cyan(usageToHuman(llmTools.totalUsage)));
7658
+ }
7509
7659
  process.exit(0);
7510
7660
  return [2 /*return*/];
7511
7661
  }