@promptbook/node 0.61.0 → 0.62.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/esm/index.es.js +114 -15
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +2 -1
  4. package/esm/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  5. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  6. package/esm/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  7. package/esm/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  8. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +1 -0
  9. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +3 -2
  10. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  11. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  13. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/LlmExecutionToolsWithTotalCost.d.ts → count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts} +5 -2
  14. package/{umd/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → esm/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  15. package/esm/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  16. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  17. package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
  18. package/package.json +3 -3
  19. package/umd/index.umd.js +114 -15
  20. package/umd/index.umd.js.map +1 -1
  21. package/umd/typings/src/_packages/core.index.d.ts +2 -1
  22. package/umd/typings/src/conversion/pipelineJsonToString.d.ts +2 -1
  23. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -0
  24. package/umd/typings/src/execution/utils/usageToHuman.d.ts +15 -0
  25. package/umd/typings/src/execution/utils/usageToHuman.test.d.ts +1 -0
  26. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +1 -0
  27. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +3 -2
  28. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  29. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +1 -1
  30. package/umd/typings/src/llm-providers/_common/utils/cache/cacheLlmTools.d.ts +7 -4
  31. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/LlmExecutionToolsWithTotalCost.d.ts → count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts} +5 -2
  32. package/{esm/typings/src/llm-providers/_common/utils/count-total-cost/countTotalCost.d.ts → umd/typings/src/llm-providers/_common/utils/count-total-usage/countTotalUsage.d.ts} +5 -2
  33. package/umd/typings/src/llm-providers/_common/utils/{count-total-cost/limitTotalCost.d.ts → count-total-usage/limitTotalUsage.d.ts} +8 -5
  34. package/umd/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +3 -0
  35. package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
package/esm/index.es.js CHANGED
@@ -459,7 +459,8 @@ function promptTemplateParameterJsonToString(promptTemplateParameterJson) {
459
459
  /**
460
460
  * TODO: !!!! Implement new features and commands into `promptTemplateParameterJsonToString`
461
461
  * TODO: [🧠] Is there a way to auto-detect missing features in pipelineJsonToString
462
- * TODO: Escape all
462
+ * TODO: [🏛] Maybe make some markdown builder
463
+ * TODO: [🏛] Escape all
463
464
  */
464
465
 
465
466
  /**
@@ -654,7 +655,7 @@ function forEachAsync(array, options, callbackfunction) {
654
655
  });
655
656
  }
656
657
 
657
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-30",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-30",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
658
659
 
659
660
  /**
660
661
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2033,6 +2034,9 @@ function joinLlmExecutionTools() {
2033
2034
  }
2034
2035
  return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
2035
2036
  }
2037
+ /**
2038
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
2039
+ */
2036
2040
 
2037
2041
  /**
2038
2042
  * Determine if the pipeline is fully prepared
@@ -2268,7 +2272,7 @@ function union() {
2268
2272
  /**
2269
2273
  * The version of the Promptbook library
2270
2274
  */
2271
- var PROMPTBOOK_VERSION = '0.61.0-30';
2275
+ var PROMPTBOOK_VERSION = '0.61.0';
2272
2276
  // TODO: !!!! List here all the versions and annotate + put into script
2273
2277
 
2274
2278
  /**
@@ -3161,6 +3165,7 @@ function createPipelineExecutor(options) {
3161
3165
  }
3162
3166
  /**
3163
3167
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3168
+ * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3164
3169
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3165
3170
  * TODO: [♈] Probbably move expectations from templates to parameters
3166
3171
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -3369,6 +3374,85 @@ TODO: [🧊] This is how it can look in future
3369
3374
  * [ ] One piece can have multiple sources
3370
3375
  */
3371
3376
 
3377
+ /**
3378
+ * Intercepts LLM tools and counts total usage of the tools
3379
+ *
3380
+ * @param llmTools LLM tools to be intercepted with usage counting
3381
+ * @returns LLM tools with same functionality with added total cost counting
3382
+ */
3383
+ function countTotalUsage(llmTools) {
3384
+ var _this = this;
3385
+ var totalUsage = ZERO_USAGE;
3386
+ var proxyTools = {
3387
+ get title() {
3388
+ // TODO: [🧠] Maybe put here some suffix
3389
+ return llmTools.title;
3390
+ },
3391
+ get description() {
3392
+ // TODO: [🧠] Maybe put here some suffix
3393
+ return llmTools.description;
3394
+ },
3395
+ listModels: function () {
3396
+ return /* not await */ llmTools.listModels();
3397
+ },
3398
+ get totalUsage() {
3399
+ return totalUsage;
3400
+ },
3401
+ };
3402
+ if (llmTools.callChatModel !== undefined) {
3403
+ proxyTools.callChatModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3404
+ var promptResult;
3405
+ return __generator(this, function (_a) {
3406
+ switch (_a.label) {
3407
+ case 0: return [4 /*yield*/, llmTools.callChatModel(prompt)];
3408
+ case 1:
3409
+ promptResult = _a.sent();
3410
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3411
+ return [2 /*return*/, promptResult];
3412
+ }
3413
+ });
3414
+ }); };
3415
+ }
3416
+ if (llmTools.callCompletionModel !== undefined) {
3417
+ proxyTools.callCompletionModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3418
+ var promptResult;
3419
+ return __generator(this, function (_a) {
3420
+ switch (_a.label) {
3421
+ case 0: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
3422
+ case 1:
3423
+ promptResult = _a.sent();
3424
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3425
+ return [2 /*return*/, promptResult];
3426
+ }
3427
+ });
3428
+ }); };
3429
+ }
3430
+ if (llmTools.callEmbeddingModel !== undefined) {
3431
+ proxyTools.callEmbeddingModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
3432
+ var promptResult;
3433
+ return __generator(this, function (_a) {
3434
+ switch (_a.label) {
3435
+ case 0: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
3436
+ case 1:
3437
+ promptResult = _a.sent();
3438
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3439
+ return [2 /*return*/, promptResult];
3440
+ }
3441
+ });
3442
+ }); };
3443
+ }
3444
+ // <- Note: [🤖]
3445
+ return proxyTools;
3446
+ }
3447
+ /**
3448
+ * TODO: [🔼] !!! Export via `@promptbookcore/`
3449
+ * TODO: [🧠][💸] Maybe make some common abstraction `interceptLlmTools` and use here (or use javascript Proxy?)
3450
+ * TODO: [🧠] Is there some meaningfull way how to test this util
3451
+ * TODO: [🧠][🌯] Maybe a way how to hide ability to `get totalUsage`
3452
+ * > const [llmToolsWithUsage,getUsage] = countTotalUsage(llmTools);
3453
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
3454
+ */
3455
+
3372
3456
  /**
3373
3457
  * Prepares the persona for the pipeline
3374
3458
  *
@@ -3492,18 +3576,19 @@ function prepareTemplates(pipeline, options) {
3492
3576
  */
3493
3577
  function preparePipeline(pipeline, options) {
3494
3578
  return __awaiter(this, void 0, void 0, function () {
3495
- var _a, maxParallelCount, parameters, promptTemplates,
3579
+ var llmTools, _a, maxParallelCount, _b, isVerbose, parameters, promptTemplates,
3496
3580
  /*
3497
3581
  <- TODO: [🧠][0] `promptbookVersion` */
3498
3582
  knowledgeSources /*
3499
3583
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3500
- <- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3584
+ <- TODO: [🧊] `preparations` */, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
3501
3585
  var _this = this;
3502
- return __generator(this, function (_b) {
3503
- switch (_b.label) {
3586
+ return __generator(this, function (_c) {
3587
+ switch (_c.label) {
3504
3588
  case 0:
3505
- _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
3589
+ llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3506
3590
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3591
+ llmToolsWithUsage = countTotalUsage(llmTools);
3507
3592
  currentPreparation = {
3508
3593
  id: 1,
3509
3594
  // TODO: [🍥]> date: $currentDate(),
@@ -3520,7 +3605,11 @@ function preparePipeline(pipeline, options) {
3520
3605
  var modelRequirements, preparedPersona;
3521
3606
  return __generator(this, function (_a) {
3522
3607
  switch (_a.label) {
3523
- case 0: return [4 /*yield*/, preparePersona(persona.description, options)];
3608
+ case 0: return [4 /*yield*/, preparePersona(persona.description, {
3609
+ llmTools: llmToolsWithUsage,
3610
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3611
+ isVerbose: isVerbose,
3612
+ })];
3524
3613
  case 1:
3525
3614
  modelRequirements = _a.sent();
3526
3615
  preparedPersona = __assign(__assign({}, persona), { modelRequirements: modelRequirements, preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] });
@@ -3530,20 +3619,30 @@ function preparePipeline(pipeline, options) {
3530
3619
  });
3531
3620
  }); })];
3532
3621
  case 1:
3533
- _b.sent();
3622
+ _c.sent();
3534
3623
  knowledgeSourcesPrepared = knowledgeSources.map(function (source) { return (__assign(__assign({}, source), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3535
- return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, options)];
3624
+ return [4 /*yield*/, prepareKnowledgePieces(knowledgeSources /* <- TODO: [🧊] {knowledgeSources, knowledgePieces} */, {
3625
+ llmTools: llmToolsWithUsage,
3626
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3627
+ isVerbose: isVerbose,
3628
+ })];
3536
3629
  case 2:
3537
- partialknowledgePiecesPrepared = _b.sent();
3630
+ partialknowledgePiecesPrepared = _c.sent();
3538
3631
  knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
3539
3632
  return [4 /*yield*/, prepareTemplates({
3540
3633
  parameters: parameters,
3541
3634
  promptTemplates: promptTemplates,
3542
3635
  knowledgePiecesCount: knowledgePiecesPrepared.length,
3543
- }, options)];
3636
+ }, {
3637
+ llmTools: llmToolsWithUsage,
3638
+ maxParallelCount: maxParallelCount /* <- TODO: [🪂] */,
3639
+ isVerbose: isVerbose,
3640
+ })];
3544
3641
  case 3:
3545
- promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
3642
+ promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3546
3643
  // ----- /Templates preparation -----
3644
+ // Note: Count total usage
3645
+ currentPreparation.modelUsage = llmToolsWithUsage.totalUsage;
3547
3646
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3548
3647
  }
3549
3648
  });
@@ -3554,7 +3653,6 @@ function preparePipeline(pipeline, options) {
3554
3653
  * TODO: Write tests for `preparePipeline`
3555
3654
  * TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
3556
3655
  * TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
3557
- * TODO: [🎐] !!!!! Use here countTotalUsage
3558
3656
  * TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
3559
3657
  */
3560
3658
 
@@ -6977,6 +7075,7 @@ function createLlmToolsFromEnv(options) {
6977
7075
  * TODO: [🧠] Is there some meaningfull way how to test this util
6978
7076
  * TODO: [🧠] Maybe pass env as argument
6979
7077
  * Note: [🟢] This code should never be published outside of `@promptbook/node`
7078
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
6980
7079
  */
6981
7080
 
6982
7081
  export { PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };