@promptbook/cli 0.61.0 → 0.62.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/esm/index.es.js +259 -88
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -1
  5. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  6. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  7. package/esm/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  8. package/esm/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  9. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +4 -0
  10. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +12 -2
  11. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +12 -2
  12. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +7 -1
  13. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  14. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +15 -0
  15. package/{umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → esm/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  16. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  17. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  19. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  20. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  21. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  23. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  24. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  25. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  26. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  27. package/package.json +4 -4
  28. package/umd/index.umd.js +281 -92
  29. package/umd/index.umd.js.map +1 -1
  30. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  31. package/umd/typings/src/_packages/core.index.d.ts +2 -1
  32. package/umd/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  33. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  34. package/umd/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  35. package/umd/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  36. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +4 -0
  37. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +12 -2
  38. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +12 -2
  39. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +7 -1
  40. package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  41. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +15 -0
  42. package/{esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → umd/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  43. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  44. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  45. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  46. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  47. package/umd/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  48. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  49. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  50. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  51. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  52. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
  53. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  54. package/esm/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +0 -11
  55. package/umd/typings/src/llm-providers/_common/utils/count-total-cost/LlmExecutionToolsWithTotalCost.d.ts +0 -11
package/umd/index.umd.js CHANGED
@@ -1,17 +1,36 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('@anthropic-ai/sdk'), require('openai'), require('glob-promise')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', '@anthropic-ai/sdk', 'openai', 'glob-promise'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.Anthropic, global.OpenAI, global.glob));
5
- })(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, Anthropic, OpenAI, glob) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('dotenv'), require('@anthropic-ai/sdk'), require('openai'), require('glob-promise')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'dotenv', '@anthropic-ai/sdk', 'openai', 'glob-promise'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.dotenv, global.Anthropic, global.OpenAI, global.glob));
5
+ })(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, dotenv, Anthropic, OpenAI, glob) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
+ function _interopNamespace(e) {
10
+ if (e && e.__esModule) return e;
11
+ var n = Object.create(null);
12
+ if (e) {
13
+ Object.keys(e).forEach(function (k) {
14
+ if (k !== 'default') {
15
+ var d = Object.getOwnPropertyDescriptor(e, k);
16
+ Object.defineProperty(n, k, d.get ? d : {
17
+ enumerable: true,
18
+ get: function () { return e[k]; }
19
+ });
20
+ }
21
+ });
22
+ }
23
+ n["default"] = e;
24
+ return Object.freeze(n);
25
+ }
26
+
9
27
  var commander__default = /*#__PURE__*/_interopDefaultLegacy(commander);
10
28
  var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
11
29
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
12
30
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
13
31
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
14
32
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
33
+ var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
15
34
  var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
16
35
  var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
17
36
  var glob__default = /*#__PURE__*/_interopDefaultLegacy(glob);
@@ -154,7 +173,7 @@
154
173
  /**
155
174
  * The version of the Promptbook library
156
175
  */
157
- var PROMPTBOOK_VERSION = '0.61.0-30';
176
+ var PROMPTBOOK_VERSION = '0.62.0-0';
158
177
  // TODO: !!!! List here all the versions and annotate + put into script
159
178
 
160
179
  /**
@@ -556,7 +575,8 @@
556
575
  /**
557
576
  * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
558
577
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
559
- * TODO: Escape all
578
+ * TODO: [🏛] Maybe make some markdown builder
579
+ * TODO: [🏛] Escape all
560
580
  */
561
581
 
562
582
  /**
@@ -751,7 +771,7 @@
751
771
  });
752
772
  }
753
773
 
754
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
774
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
755
775
 
756
776
  /**
757
777
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2130,6 +2150,9 @@
2130
2150
  }
2131
2151
  return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
2132
2152
  }
2153
+ /**
2154
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2155
+ */
2133
2156
 
2134
2157
  /**
2135
2158
  * Determine if the pipeline is fully prepared
@@ -3252,6 +3275,7 @@
3252
3275
  }
3253
3276
  /**
3254
3277
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3278
+ * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3255
3279
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3256
3280
  * TODO: [♈] Probbably move expectations from templates to parameters
3257
3281
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -3460,6 +3484,86 @@
3460
3484
  * [ ] One piece can have multiple sources
3461
3485
  */
3462
3486
 
3487
+ /**
3488
+ * Intercepts LLM tools and counts total usage of the tools
3489
+ *
3490
+ * @param llmTools LLM tools to be intercepted with usage counting
3491
+ * @returns LLM tools with same functionality with added total cost counting
3492
+ */
3493
+ function countTotalUsage(llmTools) {
3494
+ var _this = this;
3495
+ var totalUsage = ZERO_USAGE;
3496
+ var proxyTools = {
3497
+ get title() {
3498
+ // TODO: [🧠] Maybe put here some suffix
3499
+ return llmTools.title;
3500
+ },
3501
+ get description() {
3502
+ // TODO: [🧠] Maybe put here some suffix
3503
+ return llmTools.description;
3504
+ },
3505
+ listModels: function () {
3506
+ return /* not await */ llmTools.listModels();
3507
+ },
3508
+ getTotalUsage: function () {
3509
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3510
+ return totalUsage;
3511
+ },
3512
+ };
3513
+ if (llmTools.callChatModel !== undefined) {
3514
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3515
+ var promptResult;
3516
+ return __generator(this, function (_a) {
3517
+ switch (_a.label) {
3518
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
3519
+ case 1:
3520
+ promptResult = _a.sent();
3521
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3522
+ return [2 /*return*/, promptResult];
3523
+ }
3524
+ });
3525
+ }); };
3526
+ }
3527
+ if (llmTools.callCompletionModel !== undefined) {
3528
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3529
+ var promptResult;
3530
+ return __generator(this, function (_a) {
3531
+ switch (_a.label) {
3532
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
3533
+ case 1:
3534
+ promptResult = _a.sent();
3535
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3536
+ return [2 /*return*/, promptResult];
3537
+ }
3538
+ });
3539
+ }); };
3540
+ }
3541
+ if (llmTools.callEmbeddingModel !== undefined) {
3542
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3543
+ var promptResult;
3544
+ return __generator(this, function (_a) {
3545
+ switch (_a.label) {
3546
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
3547
+ case 1:
3548
+ promptResult = _a.sent();
3549
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3550
+ return [2 /*return*/, promptResult];
3551
+ }
3552
+ });
3553
+ }); };
3554
+ }
3555
+ // <- Note: [🤖]
3556
+ return proxyTools;
3557
+ }
3558
+ /**
3559
+ * TODO: [🔼] !!! Export via `@promptbookcore/`
3560
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
3561
+ * TODO: [🧠] Is there some meaningfull way how to test this util
3562
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
3563
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
3564
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3565
+ */
3566
+
3463
3567
  /**
3464
3568
  * Prepares the persona for the pipeline
3465
3569
  *
@@ -3583,23 +3687,24 @@
3583
3687
  */
3584
3688
  function preparePipeline(pipeline, options) {
3585
3689
  return __awaiter(this, void 0, void 0, function () {
3586
- var _a, maxParallelCount, parameters, promptTemplates,
3690
+ var llmTools, _a, maxParallelCount, _b, isVerbose, parameters, promptTemplates,
3587
3691
  /*
3588
3692
  <- TODO: [🧠][0] `promptbookVersion` */
3589
3693
  knowledgeSources /*
3590
3694
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3591
- <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3695
+ <- TODO: [🧊] `preparations` */, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3592
3696
  var _this = this;
3593
- return __generator(this, function (_b) {
3594
- switch (_b.label) {
3697
+ return __generator(this, function (_c) {
3698
+ switch (_c.label) {
3595
3699
  case 0:
3596
- _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3700
+ llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3597
3701
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3702
+ llmToolsWithUsage = countTotalUsage(llmTools);
3598
3703
  currentPreparation = {
3599
3704
  id: 1,
3600
3705
  // TODO: [🍥]> date: $currentDate(),
3601
3706
  promptbookVersion: PROMPTBOOK_VERSION,
3602
- modelUsage: ZERO_USAGE,
3707
+ usage: ZERO_USAGE,
3603
3708
  };
3604
3709
  preparations = [
3605
3710
  // ...preparations
@@ -3611,7 +3716,11 @@
3611
3716
  var modelRequirements, preparedPersona;
3612
3717
  return __generator(this, function (_a) {
3613
3718
  switch (_a.label) {
3614
- case 0: return [4 /*yield*/, preparePersona(persona.description, options)];
3719
+ case 0: return [4 /*yield*/, preparePersona(persona.description, {
3720
+ llmTools: llmToolsWithUsage,
3721
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3722
+ isVerbose: isVerbose,
3723
+ })];
3615
3724
  case 1:
3616
3725
  modelRequirements = _a.sent();
3617
3726
  preparedPersona = __assign(__assign({}, persona), { modelRequirements: modelRequirements, preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] });
@@ -3621,20 +3730,30 @@
3621
3730
  });
3622
3731
  }); })];
3623
3732
  case 1:
3624
- _b.sent();
3733
+ _c.sent();
3625
3734
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3626
- return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, options)];
3735
+ return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, {
3736
+ llmTools: llmToolsWithUsage,
3737
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3738
+ isVerbose: isVerbose,
3739
+ })];
3627
3740
  case 2:
3628
- partialknowledgePiecesPrepared = _b.sent();
3741
+ partialknowledgePiecesPrepared = _c.sent();
3629
3742
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3630
3743
  return [4 /*yield*/, prepareTemplates({
3631
3744
  parameters: parameters,
3632
3745
  promptTemplates: promptTemplates,
3633
3746
  knowledgePiecesCount: knowledgePiecesPrepared.length,
3634
- }, options)];
3747
+ }, {
3748
+ llmTools: llmToolsWithUsage,
3749
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3750
+ isVerbose: isVerbose,
3751
+ })];
3635
3752
  case 3:
3636
- promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
3753
+ promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3637
3754
  // ----- /Templates preparation -----
3755
+ // Note: Count total usage
3756
+ currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3638
3757
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3639
3758
  }
3640
3759
  });
@@ -3645,7 +3764,6 @@
3645
3764
  * TODO: Write tests for `preparePipeline`
3646
3765
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3647
3766
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3648
- * TODO: [🎐] !!!!! Use here countTotalUsage
3649
3767
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3650
3768
  */
3651
3769
 
@@ -5961,6 +6079,69 @@
5961
6079
  * Note: [🟢] This code should never be published outside of `@pipeline/node`
5962
6080
  */
5963
6081
 
6082
+ /**
6083
+ * Stringify the PipelineJson with proper formatting
6084
+ *
6085
+ * Note: [0] It can be used for more JSON types like whole collection of pipelines, single knowledge piece, etc.
6086
+ * Note: In contrast to JSON.stringify, this function ensures that **embedding index** is on single line
6087
+ */
6088
+ function stringifyPipelineJson(pipeline) {
6089
+ var pipelineJsonStringified = JSON.stringify(pipeline, null, 4);
6090
+ for (var i = 0; i < LOOP_LIMIT; i++) {
6091
+ pipelineJsonStringified = pipelineJsonStringified.replace(/(-?0\.\d+),[\n\s]+(-?0\.\d+)/gms, "$1".concat(REPLACING_NONCE, "$2"));
6092
+ }
6093
+ pipelineJsonStringified = pipelineJsonStringified.split(REPLACING_NONCE).join(', ');
6094
+ pipelineJsonStringified += '\n';
6095
+ return pipelineJsonStringified;
6096
+ }
6097
+ /**
6098
+ * TODO: [🐝] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
6099
+ * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
6100
+ * TODO: [🧠] Maybe more elegant solution than replacing via regex
6101
+ * TODO: [🍙] Make some standart order of json properties
6102
+ */
6103
+
6104
+ /**
6105
+ * Function usageToWorktime will take usage and estimate saved worktime in hours of reading / writing
6106
+ *
6107
+ * Note: This is an estimate based of theese sources:
6108
+ * - https://jecas.cz/doba-cteni
6109
+ * - https://www.originalnitonery.cz/blog/psani-vsemi-deseti-se-muzete-naucit-i-sami-doma
6110
+ */
6111
+ function usageToWorktime(usage) {
6112
+ var value = usage.input.wordsCount.value / (200 /* words per minute */ * 60) +
6113
+ usage.output.wordsCount.value / (40 /* words per minute */ * 60);
6114
+ var isUncertain = usage.input.wordsCount.isUncertain || usage.output.wordsCount.isUncertain;
6115
+ var uncertainNumber = { value: value };
6116
+ if (isUncertain === true) {
6117
+ uncertainNumber.isUncertain = true;
6118
+ }
6119
+ return uncertainNumber;
6120
+ }
6121
+
6122
+ /**
6123
+ * Function `usageToHuman` will take usage and convert it to human readable report
6124
+ */
6125
+ function usageToHuman(usage) {
6126
+ var report = 'Usage:';
6127
+ var uncertainNumberToHuman = function (_a) {
6128
+ var value = _a.value, isUncertain = _a.isUncertain;
6129
+ return "".concat(isUncertain ? 'approximately ' : '').concat(Math.round(value * 100) / 100);
6130
+ };
6131
+ report += '\n' + "- Cost ".concat(uncertainNumberToHuman(usage.price), " USD");
6132
+ report += '\n' + "- Saved ".concat(uncertainNumberToHuman(usageToWorktime(usage)), " hours of human time");
6133
+ return spaceTrim__default["default"](report);
6134
+ }
6135
+ /**
6136
+ * TODO: Use "$1" not "1 USD"
6137
+ * TODO: Use markdown formatting like "Cost approximately **$1**"
6138
+ * TODO: Report in minutes, seconds, days NOT 0.1 hours
6139
+ * TODO: [🧠] Maybe make from `uncertainNumberToHuman` separate exported utility
6140
+ * TODO: When negligible usage, report "Negligible" or just don't report it
6141
+ * TODO: [🧠] Maybe use "~" instead of "approximately"
6142
+ * TODO: [🏛] Maybe make some markdown builder
6143
+ */
6144
+
5964
6145
  /**
5965
6146
  * This error type indicates that you try to use a feature that is not available in the current environment
5966
6147
  */
@@ -6040,7 +6221,7 @@
6040
6221
  switch (_a.label) {
6041
6222
  case 0:
6042
6223
  filename = this.getFilenameForKey(key);
6043
- fileContent = JSON.stringify(value, null, 4);
6224
+ fileContent = stringifyPipelineJson(value);
6044
6225
  return [4 /*yield*/, promises.mkdir(path.dirname(filename), { recursive: true })];
6045
6226
  case 1:
6046
6227
  _a.sent(); // <- [0]
@@ -6107,7 +6288,7 @@
6107
6288
  * @private utility for initializating UncertainNumber
6108
6289
  */
6109
6290
  function uncertainNumber(value) {
6110
- if (value === null || value === undefined || Number.isNaN(NaN)) {
6291
+ if (value === null || value === undefined || Number.isNaN(value)) {
6111
6292
  return { value: 0, isUncertain: true };
6112
6293
  }
6113
6294
  return { value: value };
@@ -6421,6 +6602,7 @@
6421
6602
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6422
6603
  * TODO: Maybe make custom OpenaiError
6423
6604
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6605
+ * TODO: [🍜] Auto use anonymous server in browser
6424
6606
  */
6425
6607
 
6426
6608
  /**
@@ -7114,6 +7296,8 @@
7114
7296
  *
7115
7297
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7116
7298
  *
7299
+ * @@@ .env
7300
+ *
7117
7301
  * It looks for environment variables:
7118
7302
  * - `process.env.OPENAI_API_KEY`
7119
7303
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -7126,6 +7310,7 @@
7126
7310
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7127
7311
  }
7128
7312
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7313
+ dotenv__namespace.config(); // <- TODO: !!!!!! Double check [🟢]
7129
7314
  var llmTools = [];
7130
7315
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7131
7316
  llmTools.push(new OpenAiExecutionTools({
@@ -7150,6 +7335,7 @@
7150
7335
  }
7151
7336
  }
7152
7337
  /**
7338
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7153
7339
  * TODO: [🔼] !!! Export via `@promptbook/node`
7154
7340
  * TODO: @@@ write discussion about this - wizzard
7155
7341
  * TODO: Add Azure
@@ -7157,6 +7343,7 @@
7157
7343
  * TODO: [🧠] Is there some meaningfull way how to test this util
7158
7344
  * TODO: [🧠] Maybe pass env as argument
7159
7345
  * Note: [🟢] This code should never be published outside of `@promptbook/node`
7346
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7160
7347
  */
7161
7348
 
7162
7349
  /**
@@ -7224,14 +7411,17 @@
7224
7411
  /**
7225
7412
  * Intercepts LLM tools and counts total usage of the tools
7226
7413
  *
7227
- * @param llmTools LLM tools to be intercepted with usage counting
7414
+ * Note: It can take extended `LlmExecutionTools` and cache the
7415
+ *
7416
+ * @param llmTools LLM tools to be intercepted with usage counting, it can contain extra methods like `totalUsage`
7228
7417
  * @returns LLM tools with same functionality with added total cost counting
7229
7418
  */
7230
7419
  function cacheLlmTools(llmTools, options) {
7231
7420
  var _this = this;
7232
7421
  if (options === void 0) { options = {}; }
7233
- var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a;
7234
- var proxyTools = {
7422
+ var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a, _b = options.isReloaded, isReloaded = _b === void 0 ? false : _b;
7423
+ var proxyTools = __assign(__assign({}, llmTools), {
7424
+ // <- Note: [🥫]
7235
7425
  get title() {
7236
7426
  // TODO: [🧠] Maybe put here some suffix
7237
7427
  return llmTools.title;
@@ -7239,54 +7429,59 @@
7239
7429
  get description() {
7240
7430
  // TODO: [🧠] Maybe put here some suffix
7241
7431
  return llmTools.description;
7242
- },
7243
- listModels: function () {
7432
+ }, listModels: function () {
7244
7433
  // TODO: [🧠] Should be model listing also cached?
7245
7434
  return /* not await */ llmTools.listModels();
7246
- },
7247
- };
7435
+ } });
7248
7436
  var callCommonModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
7249
- var key, cacheItem, promptResult, _a;
7250
- return __generator(this, function (_b) {
7251
- switch (_b.label) {
7437
+ var key, cacheItem, _a, promptResult, _b;
7438
+ return __generator(this, function (_c) {
7439
+ switch (_c.label) {
7252
7440
  case 0:
7253
7441
  key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
7254
7442
  '-' +
7255
7443
  sha256__default["default"](hexEncoder__default["default"].parse(JSON.stringify(prompt.parameters))).toString( /* hex */));
7444
+ if (!!isReloaded) return [3 /*break*/, 2];
7256
7445
  return [4 /*yield*/, storage.getItem(key)];
7257
7446
  case 1:
7258
- cacheItem = _b.sent();
7447
+ _a = _c.sent();
7448
+ return [3 /*break*/, 3];
7449
+ case 2:
7450
+ _a = null;
7451
+ _c.label = 3;
7452
+ case 3:
7453
+ cacheItem = _a;
7259
7454
  if (cacheItem) {
7260
7455
  return [2 /*return*/, cacheItem.promptResult];
7261
7456
  }
7262
- _a = prompt.modelRequirements.modelVariant;
7263
- switch (_a) {
7264
- case 'CHAT': return [3 /*break*/, 2];
7265
- case 'COMPLETION': return [3 /*break*/, 4];
7266
- case 'EMBEDDING': return [3 /*break*/, 6];
7457
+ _b = prompt.modelRequirements.modelVariant;
7458
+ switch (_b) {
7459
+ case 'CHAT': return [3 /*break*/, 4];
7460
+ case 'COMPLETION': return [3 /*break*/, 6];
7461
+ case 'EMBEDDING': return [3 /*break*/, 8];
7267
7462
  }
7268
- return [3 /*break*/, 8];
7269
- case 2: return [4 /*yield*/, llmTools.callChatModel(prompt)];
7270
- case 3:
7271
- promptResult = _b.sent();
7272
- return [3 /*break*/, 9];
7273
- case 4: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
7463
+ return [3 /*break*/, 10];
7464
+ case 4: return [4 /*yield*/, llmTools.callChatModel(prompt)];
7274
7465
  case 5:
7275
- promptResult = _b.sent();
7276
- return [3 /*break*/, 9];
7277
- case 6: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
7466
+ promptResult = _c.sent();
7467
+ return [3 /*break*/, 11];
7468
+ case 6: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
7278
7469
  case 7:
7279
- promptResult = _b.sent();
7280
- return [3 /*break*/, 9];
7281
- case 8: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
7282
- case 9: return [4 /*yield*/, storage.setItem(key, {
7470
+ promptResult = _c.sent();
7471
+ return [3 /*break*/, 11];
7472
+ case 8: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
7473
+ case 9:
7474
+ promptResult = _c.sent();
7475
+ return [3 /*break*/, 11];
7476
+ case 10: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
7477
+ case 11: return [4 /*yield*/, storage.setItem(key, {
7283
7478
  date: $currentDate(),
7284
7479
  promptbookVersion: PROMPTBOOK_VERSION,
7285
7480
  prompt: prompt,
7286
7481
  promptResult: promptResult,
7287
7482
  })];
7288
- case 10:
7289
- _b.sent();
7483
+ case 12:
7484
+ _c.sent();
7290
7485
  return [2 /*return*/, promptResult];
7291
7486
  }
7292
7487
  });
@@ -7317,10 +7512,11 @@
7317
7512
  }
7318
7513
  /**
7319
7514
  * TODO: [🔼] !!! Export via `@promptbook/core`
7320
- * TODO: @@@ write discussion about this and storages
7321
- * write how to combine multiple interceptors
7322
7515
  * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
7323
7516
  * TODO: [🧠] Is there some meaningfull way how to test this util
7517
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7518
+ * @@@ write discussion about this and storages
7519
+ * @@@ write how to combine multiple interceptors
7324
7520
  */
7325
7521
 
7326
7522
  /**
@@ -7328,38 +7524,21 @@
7328
7524
  *
7329
7525
  * @private within the repository - for CLI utils
7330
7526
  */
7331
- function getLlmToolsForCli() {
7527
+ function getLlmToolsForCli(options) {
7332
7528
  if (!isRunningInNode()) {
7333
7529
  throw new EnvironmentMismatchError('Function `getLlmToolsForTestingAndScriptsAndPlayground` works only in Node.js environment');
7334
7530
  }
7335
- return cacheLlmTools(createLlmToolsFromEnv(), {
7531
+ var _a = (options !== null && options !== void 0 ? options : {}).isCacheReloaded, isCacheReloaded = _a === void 0 ? false : _a;
7532
+ return cacheLlmTools(countTotalUsage(
7533
+ // <- Note: for example here we don`t want the [🌯]
7534
+ createLlmToolsFromEnv()), {
7336
7535
  storage: new FilesStorage({ cacheFolderPath: path.join(process.cwd(), EXECUTIONS_CACHE_DIRNAME) }),
7536
+ isReloaded: isCacheReloaded,
7337
7537
  });
7338
7538
  }
7339
7539
  /**
7340
7540
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
7341
- */
7342
-
7343
- /**
7344
- * Stringify the PipelineJson with proper formatting
7345
- *
7346
- * Note: [0] It can be used for more JSON types like whole collection of pipelines, single knowledge piece, etc.
7347
- * Note: In contrast to JSON.stringify, this function ensures that **embedding index** is on single line
7348
- */
7349
- function stringifyPipelineJson(pipeline) {
7350
- var pipelineJsonStringified = JSON.stringify(pipeline, null, 4);
7351
- for (var i = 0; i < LOOP_LIMIT; i++) {
7352
- pipelineJsonStringified = pipelineJsonStringified.replace(/(-?0\.\d+),[\n\s]+(-?0\.\d+)/gms, "$1".concat(REPLACING_NONCE, "$2"));
7353
- }
7354
- pipelineJsonStringified = pipelineJsonStringified.split(REPLACING_NONCE).join(', ');
7355
- pipelineJsonStringified += '\n';
7356
- return pipelineJsonStringified;
7357
- }
7358
- /**
7359
- * TODO: [🐝] Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
7360
- * TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
7361
- * TODO: [🧠] Maybe more elegant solution than replacing via regex
7362
- * TODO: [🍙] Make some standart order of json properties
7541
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7363
7542
  */
7364
7543
 
7365
7544
  /**
@@ -7369,24 +7548,26 @@
7369
7548
  */
7370
7549
  function initializeMakeCommand(program) {
7371
7550
  var _this = this;
7372
- var helloCommand = program.command('make');
7373
- helloCommand.description(spaceTrim__default["default"]("\n Makes a new pipeline collection in given folder\n "));
7374
- helloCommand.argument('<path>', 'Path to promptbook directory');
7375
- helloCommand.option('--project-name', "Name of the project for whom collection is", 'Project');
7376
- helloCommand.option('-f, --format <format>', spaceTrim__default["default"]("\n Output format of builded collection \"javascript\", \"typescript\" or \"json\"\n\n Note: You can use multiple formats separated by comma\n "), 'javascript' /* <- Note: [🏳‍🌈] */);
7377
- helloCommand.option('--no-validation', "Do not validate logic of pipelines in collection", true);
7378
- helloCommand.option('--validation', "Types of validations separated by comma (options \"logic\",\"imports\")", 'logic,imports');
7379
- helloCommand.option('--verbose', "Is verbose", false);
7380
- helloCommand.option('-o, --out-file <path>', spaceTrim__default["default"]("\n Where to save the builded collection\n\n Note: If you keep it \"".concat(PIPELINE_COLLECTION_BASE_FILENAME, "\" it will be saved in the root of the promptbook directory\n If you set it to a path, it will be saved in that path\n BUT you can use only one format and set correct extension\n ")), PIPELINE_COLLECTION_BASE_FILENAME);
7381
- helloCommand.action(function (path$1, _a) {
7382
- var projectName = _a.projectName, format = _a.format, validation = _a.validation, verbose = _a.verbose, outFile = _a.outFile;
7551
+ var makeCommand = program.command('make');
7552
+ makeCommand.description(spaceTrim__default["default"]("\n Makes a new pipeline collection in given folder\n "));
7553
+ makeCommand.argument('<path>', 'Path to promptbook directory');
7554
+ makeCommand.option('--project-name', "Name of the project for whom collection is", 'Project');
7555
+ makeCommand.option('-f, --format <format>', spaceTrim__default["default"]("\n Output format of builded collection \"javascript\", \"typescript\" or \"json\"\n\n Note: You can use multiple formats separated by comma\n "), 'javascript' /* <- Note: [🏳‍🌈] */);
7556
+ makeCommand.option('--no-validation', "Do not validate logic of pipelines in collection", true);
7557
+ makeCommand.option('--validation', "Types of validations separated by comma (options \"logic\",\"imports\")", 'logic,imports');
7558
+ makeCommand.option('--reload-cache', "Use LLM models even if cached ", false);
7559
+ makeCommand.option('--verbose', "Is verbose", false);
7560
+ makeCommand.option('-o, --out-file <path>', spaceTrim__default["default"]("\n Where to save the builded collection\n\n Note: If you keep it \"".concat(PIPELINE_COLLECTION_BASE_FILENAME, "\" it will be saved in the root of the promptbook directory\n If you set it to a path, it will be saved in that path\n BUT you can use only one format and set correct extension\n ")), PIPELINE_COLLECTION_BASE_FILENAME);
7561
+ makeCommand.action(function (path$1, _a) {
7562
+ var projectName = _a.projectName, format = _a.format, validation = _a.validation, reloadCache = _a.reloadCache, verbose = _a.verbose, outFile = _a.outFile;
7383
7563
  return __awaiter(_this, void 0, void 0, function () {
7384
- var isVerbose, formats, validations, llmTools, collection, validations_1, validations_1_1, validation_1, _b, _c, pipelineUrl, pipeline, e_1_1, e_2_1, collectionJson, collectionJsonString, saveFile;
7564
+ var isCacheReloaded, isVerbose, formats, validations, llmTools, collection, validations_1, validations_1_1, validation_1, _b, _c, pipelineUrl, pipeline, e_1_1, e_2_1, collectionJson, collectionJsonString, saveFile;
7385
7565
  var e_2, _d, e_1, _e;
7386
7566
  var _this = this;
7387
7567
  return __generator(this, function (_f) {
7388
7568
  switch (_f.label) {
7389
7569
  case 0:
7570
+ isCacheReloaded = reloadCache;
7390
7571
  isVerbose = verbose;
7391
7572
  formats = (format || '')
7392
7573
  .split(',')
@@ -7400,11 +7581,14 @@
7400
7581
  console.error(colors__default["default"].red("You can use only one format when saving to a file"));
7401
7582
  process.exit(1);
7402
7583
  }
7403
- llmTools = getLlmToolsForCli();
7584
+ llmTools = getLlmToolsForCli({
7585
+ isCacheReloaded: isCacheReloaded,
7586
+ });
7404
7587
  return [4 /*yield*/, createCollectionFromDirectory(path$1, {
7405
7588
  llmTools: llmTools,
7406
7589
  isVerbose: isVerbose,
7407
7590
  isRecursive: true,
7591
+ // <- TODO: [🍖] isCacheReloaded
7408
7592
  })];
7409
7593
  case 1:
7410
7594
  collection = _f.sent();
@@ -7510,6 +7694,11 @@
7510
7694
  _f.sent();
7511
7695
  _f.label = 23;
7512
7696
  case 23:
7697
+ if (isVerbose) {
7698
+ // TODO: !!!!!! Test that this works
7699
+ console.info(colors__default["default"].green("Collection builded"));
7700
+ console.info(colors__default["default"].cyan(usageToHuman(llmTools.getTotalUsage())));
7701
+ }
7513
7702
  process.exit(0);
7514
7703
  return [2 /*return*/];
7515
7704
  }