@promptbook/node 0.63.4 → 0.64.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +460 -94
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +9 -171
  4. package/esm/typings/src/_packages/node.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +5 -2
  6. package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -2
  7. package/esm/typings/src/llm-providers/_common/LlmConfiguration.d.ts +28 -0
  8. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +22 -0
  9. package/esm/typings/src/llm-providers/_common/config.d.ts +15 -0
  10. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfiguration.d.ts +32 -0
  11. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +23 -0
  12. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +7 -22
  13. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
  17. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  19. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
  20. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +8 -2
  21. package/esm/typings/src/types/typeAliases.d.ts +2 -2
  22. package/esm/typings/src/utils/organization/TODO_string.d.ts +6 -0
  23. package/package.json +3 -2
  24. package/umd/index.umd.js +465 -97
  25. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -4,18 +4,19 @@ import { join as join$1, dirname } from 'path';
4
4
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
5
  import { format } from 'prettier';
6
6
  import parserHtml from 'prettier/parser-html';
7
+ import hexEncoder from 'crypto-js/enc-hex';
8
+ import sha256 from 'crypto-js/sha256';
7
9
  import { join } from 'path/posix';
8
- import * as dotenv from 'dotenv';
9
10
  import Anthropic from '@anthropic-ai/sdk';
11
+ import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
10
12
  import OpenAI from 'openai';
11
- import hexEncoder from 'crypto-js/enc-hex';
12
- import sha256 from 'crypto-js/sha256';
13
+ import * as dotenv from 'dotenv';
13
14
 
14
15
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
15
16
  /**
16
17
  * The version of the Promptbook library
17
18
  */
18
- var PROMPTBOOK_VERSION = '0.63.3';
19
+ var PROMPTBOOK_VERSION = '0.63.4';
19
20
  // TODO: !!!! List here all the versions and annotate + put into script
20
21
 
21
22
  /*! *****************************************************************************
@@ -390,7 +391,7 @@ function pipelineJsonToString(pipelineJson) {
390
391
  else if (blockType === 'PROMPT_DIALOG') {
391
392
  commands_1.push("PROMPT DIALOG");
392
393
  // Note: Nothing special here
393
- } // <- }else if([🩻]
394
+ } // <- }else if([🅱]
394
395
  if (jokers) {
395
396
  try {
396
397
  for (var jokers_1 = (e_4 = void 0, __values(jokers)), jokers_1_1 = jokers_1.next(); !jokers_1_1.done; jokers_1_1 = jokers_1.next()) {
@@ -690,7 +691,7 @@ function forEachAsync(array, options, callbackfunction) {
690
691
  });
691
692
  }
692
693
 
693
- var PipelineCollection = [{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md",title:"Prepare Knowledge from Markdown",promptbookVersion:"0.63.3",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md",title:"Prepare Keywords",promptbookVersion:"0.63.3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md",title:"Prepare Title",promptbookVersion:"0.63.3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",sourceFile:"./promptbook-collection/prepare-persona.ptbk.md",title:"Prepare Keywords",promptbookVersion:"0.63.3",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]}];
694
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
694
695
 
695
696
  /**
696
697
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1257,7 +1258,7 @@ var SimplePipelineCollection = /** @class */ (function () {
1257
1258
  pipelineJsonToString(unpreparePipeline(pipeline)) !==
1258
1259
  pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
1259
1260
  var existing = this.collection.get(pipeline.pipelineUrl);
1260
- throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1261
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4E\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1261
1262
  }
1262
1263
  // Note: [🧠] Overwrite existing pipeline with the same URL
1263
1264
  this.collection.set(pipeline.pipelineUrl, pipeline);
@@ -3444,7 +3445,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
3444
3445
  var partialPieces, pieces;
3445
3446
  return __generator(this, function (_a) {
3446
3447
  switch (_a.label) {
3447
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3448
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3448
3449
  options)];
3449
3450
  case 1:
3450
3451
  partialPieces = _a.sent();
@@ -3634,6 +3635,35 @@ function preparePersona(personaDescription, options) {
3634
3635
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
3635
3636
  */
3636
3637
 
3638
+ /**
3639
+ * @@@
3640
+ *
3641
+ * Note: It is usefull @@@
3642
+ *
3643
+ * @param pipeline
3644
+ * @public exported from `@promptbook/utils`
3645
+ */
3646
+ function clonePipeline(pipeline) {
3647
+ // Note: Not using spread operator (...) because @@@
3648
+ var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3649
+ return {
3650
+ pipelineUrl: pipelineUrl,
3651
+ sourceFile: sourceFile,
3652
+ title: title,
3653
+ promptbookVersion: promptbookVersion,
3654
+ description: description,
3655
+ parameters: parameters,
3656
+ promptTemplates: promptTemplates,
3657
+ knowledgeSources: knowledgeSources,
3658
+ knowledgePieces: knowledgePieces,
3659
+ personas: personas,
3660
+ preparations: preparations,
3661
+ };
3662
+ }
3663
+ /**
3664
+ * TODO: [🍙] Make some standart order of json properties
3665
+ */
3666
+
3637
3667
  /**
3638
3668
  * @@@
3639
3669
  *
@@ -3686,40 +3716,12 @@ function prepareTemplates(pipeline, options) {
3686
3716
  * TODO: [🧠][🥜]
3687
3717
  */
3688
3718
 
3689
- /**
3690
- * @@@
3691
- *
3692
- * Note: It is usefull @@@
3693
- *
3694
- * @param pipeline
3695
- * @public exported from `@promptbook/utils`
3696
- */
3697
- function clonePipeline(pipeline) {
3698
- // Note: Not using spread operator (...) because @@@
3699
- var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3700
- return {
3701
- pipelineUrl: pipelineUrl,
3702
- sourceFile: sourceFile,
3703
- title: title,
3704
- promptbookVersion: promptbookVersion,
3705
- description: description,
3706
- parameters: parameters,
3707
- promptTemplates: promptTemplates,
3708
- knowledgeSources: knowledgeSources,
3709
- knowledgePieces: knowledgePieces,
3710
- personas: personas,
3711
- preparations: preparations,
3712
- };
3713
- }
3714
- /**
3715
- * TODO: [🍙] Make some standart order of json properties
3716
- */
3717
-
3718
3719
  /**
3719
3720
  * Prepare pipeline from string (markdown) format to JSON format
3720
3721
  *
3721
3722
  * Note: This function does not validate logic of the pipeline
3722
3723
  * Note: This function acts as part of compilation process
3724
+ * Note: When the pipeline is already prepared, it returns the same pipeline
3723
3725
  * @public exported from `@promptbook/core`
3724
3726
  */
3725
3727
  function preparePipeline(pipeline, options) {
@@ -3734,6 +3736,9 @@ function preparePipeline(pipeline, options) {
3734
3736
  return __generator(this, function (_c) {
3735
3737
  switch (_c.label) {
3736
3738
  case 0:
3739
+ if (isPipelinePrepared(pipeline)) {
3740
+ return [2 /*return*/, pipeline];
3741
+ }
3737
3742
  llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3738
3743
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3739
3744
  llmToolsWithUsage = countTotalUsage(llmTools);
@@ -3867,34 +3872,37 @@ var knowledgeCommandParser = {
3867
3872
  */
3868
3873
  parse: function (input) {
3869
3874
  var args = input.args;
3870
- var source = args[0];
3871
- if (source === undefined) {
3875
+ var sourceContent = spaceTrim(args[0] || '');
3876
+ if (sourceContent === '') {
3872
3877
  throw new ParsingError("Source is not defined");
3873
3878
  }
3874
- if (source.startsWith('http://')) {
3879
+ // TODO: !!!! Following checks should be applied every link in the `sourceContent`
3880
+ if (sourceContent.startsWith('http://')) {
3875
3881
  throw new ParsingError("Source is not secure");
3876
3882
  }
3877
- if (!(isValidFilePath(source) || isValidUrl(source))) {
3883
+ if (!(isValidFilePath(sourceContent) || isValidUrl(sourceContent))) {
3878
3884
  throw new ParsingError("Source not valid");
3879
3885
  }
3880
- if (source.startsWith('../') || source.startsWith('/') || /^[A-Z]:[\\/]+/i.test(source)) {
3886
+ if (sourceContent.startsWith('../') || sourceContent.startsWith('/') || /^[A-Z]:[\\/]+/i.test(sourceContent)) {
3881
3887
  throw new ParsingError("Source cannot be outside of the .ptbk.md folder");
3882
3888
  }
3883
3889
  return {
3884
3890
  type: 'KNOWLEDGE',
3885
- source: source,
3891
+ sourceContent: sourceContent,
3886
3892
  };
3887
3893
  },
3888
3894
  /**
3889
3895
  * Note: Prototype of [🍧] (remove this comment after full implementation)
3890
3896
  */
3891
3897
  applyToPipelineJson: function (personaCommand, subjects) {
3892
- var source = personaCommand.source;
3898
+ var sourceContent = personaCommand.sourceContent;
3893
3899
  var pipelineJson = subjects.pipelineJson;
3894
- var name = titleToName(source);
3900
+ var name = 'source-' + sha256(hexEncoder.parse(JSON.stringify(sourceContent))).toString( /* hex */);
3901
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
3902
+ // <- TODO: This should be replaced with a better name later in preparation (done with some propper LLM summarization)
3895
3903
  pipelineJson.knowledgeSources.push({
3896
3904
  name: name,
3897
- source: source,
3905
+ sourceContent: sourceContent,
3898
3906
  });
3899
3907
  },
3900
3908
  };
@@ -4087,7 +4095,7 @@ var BlockTypes = [
4087
4095
  'KNOWLEDGE',
4088
4096
  'INSTRUMENT',
4089
4097
  'ACTION',
4090
- // <- [🩻]
4098
+ // <- [🅱]
4091
4099
  ];
4092
4100
 
4093
4101
  /**
@@ -4114,7 +4122,7 @@ var blockCommandParser = {
4114
4122
  'KNOWLEDGE',
4115
4123
  'INSTRUMENT',
4116
4124
  'ACTION',
4117
- // <- [🩻]
4125
+ // <- [🅱]
4118
4126
  ],
4119
4127
  /**
4120
4128
  * Aliases for the BLOCK command
@@ -5580,7 +5588,7 @@ function pipelineStringToJsonSync(pipelineString) {
5580
5588
  if (command.blockType === 'KNOWLEDGE') {
5581
5589
  knowledgeCommandParser.applyToPipelineJson({
5582
5590
  type: 'KNOWLEDGE',
5583
- source: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5591
+ sourceContent: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5584
5592
  }, {
5585
5593
  pipelineJson: pipelineJson,
5586
5594
  templateJson: templateJson,
@@ -6191,7 +6199,7 @@ function createCollectionFromDirectory(path, options) {
6191
6199
  }
6192
6200
  else {
6193
6201
  existing = collection.get(pipeline.pipelineUrl);
6194
- throw new ReferenceError(spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6202
+ throw new ReferenceError(spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4F\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6195
6203
  }
6196
6204
  }
6197
6205
  }
@@ -6257,22 +6265,6 @@ function createCollectionFromDirectory(path, options) {
6257
6265
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
6258
6266
  */
6259
6267
 
6260
- /**
6261
- * This error type indicates that you try to use a feature that is not available in the current environment
6262
- *
6263
- * @public exported from `@promptbook/core`
6264
- */
6265
- var EnvironmentMismatchError = /** @class */ (function (_super) {
6266
- __extends(EnvironmentMismatchError, _super);
6267
- function EnvironmentMismatchError(message) {
6268
- var _this = _super.call(this, message) || this;
6269
- _this.name = 'EnvironmentMismatchError';
6270
- Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
6271
- return _this;
6272
- }
6273
- return EnvironmentMismatchError;
6274
- }(Error));
6275
-
6276
6268
  /**
6277
6269
  * Helper of usage compute
6278
6270
  *
@@ -6618,6 +6610,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6618
6610
  * TODO: Maybe make custom OpenaiError
6619
6611
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6620
6612
  * TODO: [🍜] Auto use anonymous server in browser
6613
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6621
6614
  */
6622
6615
 
6623
6616
  /**
@@ -6971,6 +6964,255 @@ var OPENAI_MODELS = [
6971
6964
  * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
6972
6965
  */
6973
6966
 
6967
+ /**
6968
+ * Execution Tools for calling Azure OpenAI API.
6969
+ *
6970
+ * @public exported from `@promptbook/azure-openai`
6971
+ */
6972
+ var AzureOpenAiExecutionTools = /** @class */ (function () {
6973
+ /**
6974
+ * Creates OpenAI Execution Tools.
6975
+ *
6976
+ * @param options which are relevant are directly passed to the OpenAI client
6977
+ */
6978
+ function AzureOpenAiExecutionTools(options) {
6979
+ this.options = options;
6980
+ this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
6981
+ }
6982
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
6983
+ get: function () {
6984
+ return 'Azure OpenAI';
6985
+ },
6986
+ enumerable: false,
6987
+ configurable: true
6988
+ });
6989
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
6990
+ get: function () {
6991
+ return 'Use all models trained by OpenAI provided by Azure';
6992
+ },
6993
+ enumerable: false,
6994
+ configurable: true
6995
+ });
6996
+ /**
6997
+ * Calls OpenAI API to use a chat model.
6998
+ */
6999
+ AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7000
+ var _a, _b;
7001
+ return __awaiter(this, void 0, void 0, function () {
7002
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7003
+ var _c;
7004
+ return __generator(this, function (_d) {
7005
+ switch (_d.label) {
7006
+ case 0:
7007
+ if (this.options.isVerbose) {
7008
+ console.info('💬 OpenAI callChatModel call');
7009
+ }
7010
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7011
+ // TODO: [☂] Use here more modelRequirements
7012
+ if (modelRequirements.modelVariant !== 'CHAT') {
7013
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7014
+ }
7015
+ _d.label = 1;
7016
+ case 1:
7017
+ _d.trys.push([1, 3, , 4]);
7018
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7019
+ modelSettings = {
7020
+ maxTokens: modelRequirements.maxTokens,
7021
+ // <- TODO: [🌾] Make some global max cap for maxTokens
7022
+ temperature: modelRequirements.temperature,
7023
+ user: this.options.user,
7024
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7025
+ // <- Note: [🧆]
7026
+ };
7027
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7028
+ messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7029
+ ? []
7030
+ : [
7031
+ {
7032
+ role: 'system',
7033
+ content: modelRequirements.systemMessage,
7034
+ },
7035
+ ])), false), [
7036
+ {
7037
+ role: 'user',
7038
+ content: rawPromptContent,
7039
+ },
7040
+ ], false);
7041
+ start = getCurrentIsoDate();
7042
+ complete = void 0;
7043
+ if (this.options.isVerbose) {
7044
+ console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7045
+ }
7046
+ rawRequest = [modelName, messages, modelSettings];
7047
+ return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7048
+ case 2:
7049
+ rawResponse = _d.sent();
7050
+ if (this.options.isVerbose) {
7051
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7052
+ }
7053
+ if (!rawResponse.choices[0]) {
7054
+ throw new PipelineExecutionError('No choises from Azure OpenAI');
7055
+ }
7056
+ if (rawResponse.choices.length > 1) {
7057
+ // TODO: This should be maybe only warning
7058
+ throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7059
+ }
7060
+ if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7061
+ throw new PipelineExecutionError('Empty response from Azure OpenAI');
7062
+ }
7063
+ resultContent = rawResponse.choices[0].message.content;
7064
+ // eslint-disable-next-line prefer-const
7065
+ complete = getCurrentIsoDate();
7066
+ usage = {
7067
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7068
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7069
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7070
+ };
7071
+ return [2 /*return*/, {
7072
+ content: resultContent,
7073
+ modelName: modelName,
7074
+ timing: {
7075
+ start: start,
7076
+ complete: complete,
7077
+ },
7078
+ usage: usage,
7079
+ rawPromptContent: rawPromptContent,
7080
+ rawRequest: rawRequest,
7081
+ rawResponse: rawResponse,
7082
+ // <- [🗯]
7083
+ }];
7084
+ case 3:
7085
+ error_1 = _d.sent();
7086
+ throw this.transformAzureError(error_1);
7087
+ case 4: return [2 /*return*/];
7088
+ }
7089
+ });
7090
+ });
7091
+ };
7092
+ /**
7093
+ * Calls Azure OpenAI API to use a complete model.
7094
+ */
7095
+ AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7096
+ var _a, _b;
7097
+ return __awaiter(this, void 0, void 0, function () {
7098
+ var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7099
+ var _c;
7100
+ return __generator(this, function (_d) {
7101
+ switch (_d.label) {
7102
+ case 0:
7103
+ if (this.options.isVerbose) {
7104
+ console.info('🖋 OpenAI callCompletionModel call');
7105
+ }
7106
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7107
+ // TODO: [☂] Use here more modelRequirements
7108
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
7109
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7110
+ }
7111
+ _d.label = 1;
7112
+ case 1:
7113
+ _d.trys.push([1, 3, , 4]);
7114
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7115
+ modelSettings = {
7116
+ maxTokens: modelRequirements.maxTokens || 2000,
7117
+ // <- TODO: [🌾] Make some global max cap for maxTokens
7118
+ temperature: modelRequirements.temperature,
7119
+ user: this.options.user,
7120
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7121
+ // <- Note: [🧆]
7122
+ };
7123
+ start = getCurrentIsoDate();
7124
+ complete = void 0;
7125
+ if (this.options.isVerbose) {
7126
+ console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
7127
+ console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7128
+ }
7129
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7130
+ rawRequest = [
7131
+ modelName,
7132
+ [rawPromptContent],
7133
+ modelSettings,
7134
+ ];
7135
+ return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7136
+ case 2:
7137
+ rawResponse = _d.sent();
7138
+ if (this.options.isVerbose) {
7139
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7140
+ }
7141
+ if (!rawResponse.choices[0]) {
7142
+ throw new PipelineExecutionError('No choises from OpenAI');
7143
+ }
7144
+ if (rawResponse.choices.length > 1) {
7145
+ // TODO: This should be maybe only warning
7146
+ throw new PipelineExecutionError('More than one choise from OpenAI');
7147
+ }
7148
+ resultContent = rawResponse.choices[0].text;
7149
+ // eslint-disable-next-line prefer-const
7150
+ complete = getCurrentIsoDate();
7151
+ usage = {
7152
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7153
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7154
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7155
+ };
7156
+ return [2 /*return*/, {
7157
+ content: resultContent,
7158
+ modelName: modelName,
7159
+ timing: {
7160
+ start: start,
7161
+ complete: complete,
7162
+ },
7163
+ usage: usage,
7164
+ rawPromptContent: rawPromptContent,
7165
+ rawRequest: rawRequest,
7166
+ rawResponse: rawResponse,
7167
+ // <- [🗯]
7168
+ }];
7169
+ case 3:
7170
+ error_2 = _d.sent();
7171
+ throw this.transformAzureError(error_2);
7172
+ case 4: return [2 /*return*/];
7173
+ }
7174
+ });
7175
+ });
7176
+ };
7177
+ // <- Note: [🤖] callXxxModel
7178
+ /**
7179
+ * Changes Azure error (which is not propper Error but object) to propper Error
7180
+ */
7181
+ AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7182
+ if (typeof azureError !== 'object' || azureError === null) {
7183
+ return new PipelineExecutionError("Unknown Azure OpenAI error");
7184
+ }
7185
+ var code = azureError.code, message = azureError.message;
7186
+ return new PipelineExecutionError("".concat(code, ": ").concat(message));
7187
+ };
7188
+ /**
7189
+ * List all available Azure OpenAI models that can be used
7190
+ */
7191
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
7192
+ return __awaiter(this, void 0, void 0, function () {
7193
+ return __generator(this, function (_a) {
7194
+ // TODO: !!! Do here some filtering which models are really available as deployment
7195
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7196
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7197
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7198
+ return ({
7199
+ modelTitle: "Azure ".concat(modelTitle),
7200
+ modelName: modelName,
7201
+ modelVariant: modelVariant,
7202
+ });
7203
+ })];
7204
+ });
7205
+ });
7206
+ };
7207
+ return AzureOpenAiExecutionTools;
7208
+ }());
7209
+ /**
7210
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7211
+ * TODO: Maybe make custom AzureOpenaiError
7212
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7213
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7214
+ */
7215
+
6974
7216
  /**
6975
7217
  * Computes the usage of the OpenAI API based on the response from OpenAI
6976
7218
  *
@@ -7307,12 +7549,104 @@ var OpenAiExecutionTools = /** @class */ (function () {
7307
7549
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7308
7550
  * TODO: Maybe make custom OpenaiError
7309
7551
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7552
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7553
+ */
7554
+
7555
+ /**
7556
+ * @public exported from `@promptbook/node`
7557
+ */
7558
+ var LLM_CONFIGURATION_BOILERPLATES = [
7559
+ {
7560
+ title: 'Open AI',
7561
+ packageName: '@promptbook/openai',
7562
+ className: 'OpenAiExecutionTools',
7563
+ options: {
7564
+ apiKey: 'sk-',
7565
+ },
7566
+ },
7567
+ {
7568
+ title: 'Anthropic Claude',
7569
+ packageName: '@promptbook/anthropic-claude',
7570
+ className: 'AnthropicClaudeExecutionTools',
7571
+ options: {
7572
+ apiKey: 'sk-ant-api03-',
7573
+ },
7574
+ },
7575
+ {
7576
+ title: 'Azure Open AI',
7577
+ packageName: '@promptbook/azure-openai',
7578
+ className: 'AzureOpenAiExecutionTools',
7579
+ options: {
7580
+ // TODO: !!!> resourceName
7581
+ // TODO: !!!> deploymentName
7582
+ apiKey: 'sk-',
7583
+ },
7584
+ },
7585
+ // <- Note: [🦑] Add here new LLM provider
7586
+ ];
7587
+ /**
7588
+ * @private internal type for `createLlmToolsFromConfiguration`
7310
7589
  */
7590
+ var EXECUTION_TOOLS_CLASSES = {
7591
+ getOpenAiExecutionTools: function (options) {
7592
+ return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7593
+ },
7594
+ getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
7595
+ getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7596
+ // <- Note: [🦑] Add here new LLM provider
7597
+ };
7598
+ /**
7599
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
7600
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7601
+ */
7602
+
7603
+ /**
7604
+ * This error type indicates that you try to use a feature that is not available in the current environment
7605
+ *
7606
+ * @public exported from `@promptbook/core`
7607
+ */
7608
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
7609
+ __extends(EnvironmentMismatchError, _super);
7610
+ function EnvironmentMismatchError(message) {
7611
+ var _this = _super.call(this, message) || this;
7612
+ _this.name = 'EnvironmentMismatchError';
7613
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
7614
+ return _this;
7615
+ }
7616
+ return EnvironmentMismatchError;
7617
+ }(Error));
7311
7618
 
7312
7619
  /**
7313
7620
  * @@@
7314
7621
  *
7315
- * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7622
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7623
+ *
7624
+ * @returns @@@
7625
+ * @public exported from `@promptbook/node`
7626
+ */
7627
+ function createLlmToolsFromConfiguration(configuration, options) {
7628
+ if (options === void 0) { options = {}; }
7629
+ if (!isRunningInNode()) {
7630
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7631
+ }
7632
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7633
+ dotenv.config();
7634
+ var llmTools = configuration.map(function (llmConfiguration) {
7635
+ return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7636
+ });
7637
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7638
+ }
7639
+ /**
7640
+ * TODO: [🧠][🎌] Dynamically install required providers
7641
+ * TODO: @@@ write discussion about this - wizzard
7642
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
7643
+ * TODO: [🧠] Is there some meaningfull way how to test this util
7644
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7645
+ * TODO: This should be maybe not under `_common` but under `utils`
7646
+ */
7647
+
7648
+ /**
7649
+ * @@@
7316
7650
  *
7317
7651
  * @@@ .env
7318
7652
  *
@@ -7323,46 +7657,77 @@ var OpenAiExecutionTools = /** @class */ (function () {
7323
7657
  * @returns @@@
7324
7658
  * @public exported from `@promptbook/node`
7325
7659
  */
7326
- function createLlmToolsFromEnv(options) {
7327
- if (options === void 0) { options = {}; }
7660
+ function createLlmToolsFromConfigurationFromEnv() {
7328
7661
  if (!isRunningInNode()) {
7329
7662
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7330
7663
  }
7331
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7332
- dotenv.config();
7333
- var llmTools = [];
7664
+ var llmToolsConfiguration = [];
7334
7665
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7335
- llmTools.push(new OpenAiExecutionTools({
7336
- isVerbose: isVerbose,
7337
- apiKey: process.env.OPENAI_API_KEY,
7338
- }));
7666
+ llmToolsConfiguration.push({
7667
+ title: 'OpenAI (from env)',
7668
+ packageName: '@promptbook/openai',
7669
+ className: 'OpenAiExecutionTools',
7670
+ options: {
7671
+ apiKey: process.env.OPENAI_API_KEY,
7672
+ },
7673
+ });
7339
7674
  }
7340
7675
  if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
7341
- llmTools.push(new AnthropicClaudeExecutionTools({
7342
- isVerbose: isVerbose,
7343
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7344
- }));
7345
- }
7346
- if (llmTools.length === 0) {
7347
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7676
+ llmToolsConfiguration.push({
7677
+ title: 'Claude (from env)',
7678
+ packageName: '@promptbook/antrhopic-claude',
7679
+ className: 'AnthropicClaudeExecutionTools',
7680
+ options: {
7681
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7682
+ },
7683
+ });
7348
7684
  }
7349
- else if (llmTools.length === 1) {
7350
- return llmTools[0];
7685
+ // <- Note: [🦑] Add here new LLM provider
7686
+ return llmToolsConfiguration;
7687
+ }
7688
+ /**
7689
+ * TODO: Add Azure OpenAI
7690
+ * TODO: [🧠][🍛]
7691
+ * TODO: [🧠] Is there some meaningfull way how to test this util
7692
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7693
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7694
+ * TODO: This should be maybe not under `_common` but under `utils`
7695
+ * TODO: [🧠] Maybe pass env as argument
7696
+ */
7697
+
7698
+ /**
7699
+ * @@@
7700
+ *
7701
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7702
+ *
7703
+ * @@@ .env
7704
+ *
7705
+ * It looks for environment variables:
7706
+ * - `process.env.OPENAI_API_KEY`
7707
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
7708
+ *
7709
+ * @returns @@@
7710
+ * @public exported from `@promptbook/node`
7711
+ */
7712
+ function createLlmToolsFromEnv(options) {
7713
+ if (options === void 0) { options = {}; }
7714
+ if (!isRunningInNode()) {
7715
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7351
7716
  }
7352
- else {
7353
- return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7717
+ var configuration = createLlmToolsFromConfigurationFromEnv();
7718
+ if (configuration.length === 0) {
7719
+ // TODO: [🥃]
7720
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7354
7721
  }
7722
+ return createLlmToolsFromConfiguration(configuration, options);
7355
7723
  }
7356
7724
  /**
7357
- * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7358
- * TODO: @@@ write discussion about this - wizzard
7359
- * TODO: Add Azure
7360
- * TODO: [🧠] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
7725
+ * TODO: @@@ write `createLlmToolsFromEnv` vs `createLlmToolsFromConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
7726
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
7361
7727
  * TODO: [🧠] Is there some meaningfull way how to test this util
7362
- * TODO: [🧠] Maybe pass env as argument
7363
7728
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7364
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7365
7729
  * TODO: [🥃] Allow `ptbk make` without llm tools
7730
+ * TODO: This should be maybe not under `_common` but under `utils`
7366
7731
  */
7367
7732
 
7368
7733
  /**
@@ -7416,6 +7781,7 @@ var FilesStorage = /** @class */ (function () {
7416
7781
  FilesStorage.prototype.getFilenameForKey = function (key) {
7417
7782
  var name = titleToName(key);
7418
7783
  var hash = sha256(hexEncoder.parse(name)).toString( /* hex */);
7784
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
7419
7785
  return join$1.apply(void 0, __spreadArray(__spreadArray([this.options.cacheFolderPath], __read(nameToSubfolderPath(hash /* <- TODO: [🎎] Maybe add some SHA256 prefix */)), false), ["".concat(name.substring(0, MAX_FILENAME_LENGTH), ".json")], false));
7420
7786
  };
7421
7787
  /**
@@ -7492,5 +7858,5 @@ var FilesStorage = /** @class */ (function () {
7492
7858
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7493
7859
  */
7494
7860
 
7495
- export { FilesStorage, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };
7861
+ export { FilesStorage, LLM_CONFIGURATION_BOILERPLATES, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromConfiguration, createLlmToolsFromConfigurationFromEnv, createLlmToolsFromEnv };
7496
7862
  //# sourceMappingURL=index.es.js.map