@promptbook/cli 0.63.4 → 0.65.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. package/esm/index.es.js +410 -75
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +9 -171
  4. package/esm/typings/src/_packages/node.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +4 -2
  6. package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -2
  7. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +28 -0
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +15 -0
  9. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfiguration.d.ts +32 -0
  10. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +23 -0
  11. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +7 -22
  12. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +1 -0
  13. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  14. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  15. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
  17. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  18. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
  19. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +8 -2
  20. package/esm/typings/src/types/typeAliases.d.ts +2 -2
  21. package/esm/typings/src/utils/organization/TODO_string.d.ts +6 -0
  22. package/package.json +2 -1
  23. package/umd/index.umd.js +412 -78
  24. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -6,11 +6,12 @@ import { stat, access, constants, readdir, readFile, writeFile, mkdir, unlink }
6
6
  import { join as join$1, dirname } from 'path';
7
7
  import { format } from 'prettier';
8
8
  import parserHtml from 'prettier/parser-html';
9
- import { join } from 'path/posix';
10
9
  import hexEncoder from 'crypto-js/enc-hex';
11
10
  import sha256 from 'crypto-js/sha256';
11
+ import { join } from 'path/posix';
12
12
  import * as dotenv from 'dotenv';
13
13
  import Anthropic from '@anthropic-ai/sdk';
14
+ import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
14
15
  import OpenAI from 'openai';
15
16
  import glob from 'glob-promise';
16
17
 
@@ -18,7 +19,7 @@ import glob from 'glob-promise';
18
19
  /**
19
20
  * The version of the Promptbook library
20
21
  */
21
- var PROMPTBOOK_VERSION = '0.63.3';
22
+ var PROMPTBOOK_VERSION = '0.64.0-0';
22
23
  // TODO: !!!! List here all the versions and annotate + put into script
23
24
 
24
25
  /*! *****************************************************************************
@@ -539,7 +540,7 @@ function pipelineJsonToString(pipelineJson) {
539
540
  else if (blockType === 'PROMPT_DIALOG') {
540
541
  commands_1.push("PROMPT DIALOG");
541
542
  // Note: Nothing special here
542
- } // <- }else if([🩻]
543
+ } // <- }else if([🅱]
543
544
  if (jokers) {
544
545
  try {
545
546
  for (var jokers_1 = (e_4 = void 0, __values(jokers)), jokers_1_1 = jokers_1.next(); !jokers_1_1.done; jokers_1_1 = jokers_1.next()) {
@@ -839,7 +840,7 @@ function forEachAsync(array, options, callbackfunction) {
839
840
  });
840
841
  }
841
842
 
842
- var PipelineCollection = [{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md",title:"Prepare Knowledge from Markdown",promptbookVersion:"0.63.3",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md",title:"Prepare Keywords",promptbookVersion:"0.63.3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md",title:"Prepare Title",promptbookVersion:"0.63.3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",sourceFile:"./promptbook-collection/prepare-persona.ptbk.md",title:"Prepare Keywords",promptbookVersion:"0.63.3",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.3",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]}];
843
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.64.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.64.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.64.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.64.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
843
844
 
844
845
  /**
845
846
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1406,7 +1407,7 @@ var SimplePipelineCollection = /** @class */ (function () {
1406
1407
  pipelineJsonToString(unpreparePipeline(pipeline)) !==
1407
1408
  pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
1408
1409
  var existing = this.collection.get(pipeline.pipelineUrl);
1409
- throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1410
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4E\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1410
1411
  }
1411
1412
  // Note: [🧠] Overwrite existing pipeline with the same URL
1412
1413
  this.collection.set(pipeline.pipelineUrl, pipeline);
@@ -3593,7 +3594,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
3593
3594
  var partialPieces, pieces;
3594
3595
  return __generator(this, function (_a) {
3595
3596
  switch (_a.label) {
3596
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3597
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3597
3598
  options)];
3598
3599
  case 1:
3599
3600
  partialPieces = _a.sent();
@@ -3783,6 +3784,35 @@ function preparePersona(personaDescription, options) {
3783
3784
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
3784
3785
  */
3785
3786
 
3787
+ /**
3788
+ * @@@
3789
+ *
3790
+ * Note: It is usefull @@@
3791
+ *
3792
+ * @param pipeline
3793
+ * @public exported from `@promptbook/utils`
3794
+ */
3795
+ function clonePipeline(pipeline) {
3796
+ // Note: Not using spread operator (...) because @@@
3797
+ var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3798
+ return {
3799
+ pipelineUrl: pipelineUrl,
3800
+ sourceFile: sourceFile,
3801
+ title: title,
3802
+ promptbookVersion: promptbookVersion,
3803
+ description: description,
3804
+ parameters: parameters,
3805
+ promptTemplates: promptTemplates,
3806
+ knowledgeSources: knowledgeSources,
3807
+ knowledgePieces: knowledgePieces,
3808
+ personas: personas,
3809
+ preparations: preparations,
3810
+ };
3811
+ }
3812
+ /**
3813
+ * TODO: [🍙] Make some standart order of json properties
3814
+ */
3815
+
3786
3816
  /**
3787
3817
  * @@@
3788
3818
  *
@@ -3835,40 +3865,12 @@ function prepareTemplates(pipeline, options) {
3835
3865
  * TODO: [🧠][🥜]
3836
3866
  */
3837
3867
 
3838
- /**
3839
- * @@@
3840
- *
3841
- * Note: It is usefull @@@
3842
- *
3843
- * @param pipeline
3844
- * @public exported from `@promptbook/utils`
3845
- */
3846
- function clonePipeline(pipeline) {
3847
- // Note: Not using spread operator (...) because @@@
3848
- var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3849
- return {
3850
- pipelineUrl: pipelineUrl,
3851
- sourceFile: sourceFile,
3852
- title: title,
3853
- promptbookVersion: promptbookVersion,
3854
- description: description,
3855
- parameters: parameters,
3856
- promptTemplates: promptTemplates,
3857
- knowledgeSources: knowledgeSources,
3858
- knowledgePieces: knowledgePieces,
3859
- personas: personas,
3860
- preparations: preparations,
3861
- };
3862
- }
3863
- /**
3864
- * TODO: [🍙] Make some standart order of json properties
3865
- */
3866
-
3867
3868
  /**
3868
3869
  * Prepare pipeline from string (markdown) format to JSON format
3869
3870
  *
3870
3871
  * Note: This function does not validate logic of the pipeline
3871
3872
  * Note: This function acts as part of compilation process
3873
+ * Note: When the pipeline is already prepared, it returns the same pipeline
3872
3874
  * @public exported from `@promptbook/core`
3873
3875
  */
3874
3876
  function preparePipeline(pipeline, options) {
@@ -3883,6 +3885,9 @@ function preparePipeline(pipeline, options) {
3883
3885
  return __generator(this, function (_c) {
3884
3886
  switch (_c.label) {
3885
3887
  case 0:
3888
+ if (isPipelinePrepared(pipeline)) {
3889
+ return [2 /*return*/, pipeline];
3890
+ }
3886
3891
  llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3887
3892
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3888
3893
  llmToolsWithUsage = countTotalUsage(llmTools);
@@ -4016,34 +4021,37 @@ var knowledgeCommandParser = {
4016
4021
  */
4017
4022
  parse: function (input) {
4018
4023
  var args = input.args;
4019
- var source = args[0];
4020
- if (source === undefined) {
4024
+ var sourceContent = spaceTrim(args[0] || '');
4025
+ if (sourceContent === '') {
4021
4026
  throw new ParsingError("Source is not defined");
4022
4027
  }
4023
- if (source.startsWith('http://')) {
4028
+ // TODO: !!!! Following checks should be applied every link in the `sourceContent`
4029
+ if (sourceContent.startsWith('http://')) {
4024
4030
  throw new ParsingError("Source is not secure");
4025
4031
  }
4026
- if (!(isValidFilePath(source) || isValidUrl(source))) {
4032
+ if (!(isValidFilePath(sourceContent) || isValidUrl(sourceContent))) {
4027
4033
  throw new ParsingError("Source not valid");
4028
4034
  }
4029
- if (source.startsWith('../') || source.startsWith('/') || /^[A-Z]:[\\/]+/i.test(source)) {
4035
+ if (sourceContent.startsWith('../') || sourceContent.startsWith('/') || /^[A-Z]:[\\/]+/i.test(sourceContent)) {
4030
4036
  throw new ParsingError("Source cannot be outside of the .ptbk.md folder");
4031
4037
  }
4032
4038
  return {
4033
4039
  type: 'KNOWLEDGE',
4034
- source: source,
4040
+ sourceContent: sourceContent,
4035
4041
  };
4036
4042
  },
4037
4043
  /**
4038
4044
  * Note: Prototype of [🍧] (remove this comment after full implementation)
4039
4045
  */
4040
4046
  applyToPipelineJson: function (personaCommand, subjects) {
4041
- var source = personaCommand.source;
4047
+ var sourceContent = personaCommand.sourceContent;
4042
4048
  var pipelineJson = subjects.pipelineJson;
4043
- var name = titleToName(source);
4049
+ var name = 'source-' + sha256(hexEncoder.parse(JSON.stringify(sourceContent))).toString( /* hex */);
4050
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
4051
+ // <- TODO: This should be replaced with a better name later in preparation (done with some propper LLM summarization)
4044
4052
  pipelineJson.knowledgeSources.push({
4045
4053
  name: name,
4046
- source: source,
4054
+ sourceContent: sourceContent,
4047
4055
  });
4048
4056
  },
4049
4057
  };
@@ -4236,7 +4244,7 @@ var BlockTypes = [
4236
4244
  'KNOWLEDGE',
4237
4245
  'INSTRUMENT',
4238
4246
  'ACTION',
4239
- // <- [🩻]
4247
+ // <- [🅱]
4240
4248
  ];
4241
4249
 
4242
4250
  /**
@@ -4263,7 +4271,7 @@ var blockCommandParser = {
4263
4271
  'KNOWLEDGE',
4264
4272
  'INSTRUMENT',
4265
4273
  'ACTION',
4266
- // <- [🩻]
4274
+ // <- [🅱]
4267
4275
  ],
4268
4276
  /**
4269
4277
  * Aliases for the BLOCK command
@@ -5729,7 +5737,7 @@ function pipelineStringToJsonSync(pipelineString) {
5729
5737
  if (command.blockType === 'KNOWLEDGE') {
5730
5738
  knowledgeCommandParser.applyToPipelineJson({
5731
5739
  type: 'KNOWLEDGE',
5732
- source: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5740
+ sourceContent: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5733
5741
  }, {
5734
5742
  pipelineJson: pipelineJson,
5735
5743
  templateJson: templateJson,
@@ -6321,7 +6329,7 @@ function createCollectionFromDirectory(path, options) {
6321
6329
  }
6322
6330
  else {
6323
6331
  existing = collection.get(pipeline.pipelineUrl);
6324
- throw new ReferenceError(spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6332
+ throw new ReferenceError(spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4F\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6325
6333
  }
6326
6334
  }
6327
6335
  }
@@ -6483,6 +6491,7 @@ var FilesStorage = /** @class */ (function () {
6483
6491
  FilesStorage.prototype.getFilenameForKey = function (key) {
6484
6492
  var name = titleToName(key);
6485
6493
  var hash = sha256(hexEncoder.parse(name)).toString( /* hex */);
6494
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
6486
6495
  return join$1.apply(void 0, __spreadArray(__spreadArray([this.options.cacheFolderPath], __read(nameToSubfolderPath(hash /* <- TODO: [🎎] Maybe add some SHA256 prefix */)), false), ["".concat(name.substring(0, MAX_FILENAME_LENGTH), ".json")], false));
6487
6496
  };
6488
6497
  /**
@@ -6904,6 +6913,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6904
6913
  * TODO: Maybe make custom OpenaiError
6905
6914
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6906
6915
  * TODO: [🍜] Auto use anonymous server in browser
6916
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6907
6917
  */
6908
6918
 
6909
6919
  /**
@@ -7257,6 +7267,255 @@ var OPENAI_MODELS = [
7257
7267
  * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
7258
7268
  */
7259
7269
 
7270
+ /**
7271
+ * Execution Tools for calling Azure OpenAI API.
7272
+ *
7273
+ * @public exported from `@promptbook/azure-openai`
7274
+ */
7275
+ var AzureOpenAiExecutionTools = /** @class */ (function () {
7276
+ /**
7277
+ * Creates OpenAI Execution Tools.
7278
+ *
7279
+ * @param options which are relevant are directly passed to the OpenAI client
7280
+ */
7281
+ function AzureOpenAiExecutionTools(options) {
7282
+ this.options = options;
7283
+ this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7284
+ }
7285
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7286
+ get: function () {
7287
+ return 'Azure OpenAI';
7288
+ },
7289
+ enumerable: false,
7290
+ configurable: true
7291
+ });
7292
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
7293
+ get: function () {
7294
+ return 'Use all models trained by OpenAI provided by Azure';
7295
+ },
7296
+ enumerable: false,
7297
+ configurable: true
7298
+ });
7299
+ /**
7300
+ * Calls OpenAI API to use a chat model.
7301
+ */
7302
+ AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7303
+ var _a, _b;
7304
+ return __awaiter(this, void 0, void 0, function () {
7305
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7306
+ var _c;
7307
+ return __generator(this, function (_d) {
7308
+ switch (_d.label) {
7309
+ case 0:
7310
+ if (this.options.isVerbose) {
7311
+ console.info('💬 OpenAI callChatModel call');
7312
+ }
7313
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7314
+ // TODO: [☂] Use here more modelRequirements
7315
+ if (modelRequirements.modelVariant !== 'CHAT') {
7316
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7317
+ }
7318
+ _d.label = 1;
7319
+ case 1:
7320
+ _d.trys.push([1, 3, , 4]);
7321
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7322
+ modelSettings = {
7323
+ maxTokens: modelRequirements.maxTokens,
7324
+ // <- TODO: [🌾] Make some global max cap for maxTokens
7325
+ temperature: modelRequirements.temperature,
7326
+ user: this.options.user,
7327
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7328
+ // <- Note: [🧆]
7329
+ };
7330
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7331
+ messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7332
+ ? []
7333
+ : [
7334
+ {
7335
+ role: 'system',
7336
+ content: modelRequirements.systemMessage,
7337
+ },
7338
+ ])), false), [
7339
+ {
7340
+ role: 'user',
7341
+ content: rawPromptContent,
7342
+ },
7343
+ ], false);
7344
+ start = getCurrentIsoDate();
7345
+ complete = void 0;
7346
+ if (this.options.isVerbose) {
7347
+ console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7348
+ }
7349
+ rawRequest = [modelName, messages, modelSettings];
7350
+ return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7351
+ case 2:
7352
+ rawResponse = _d.sent();
7353
+ if (this.options.isVerbose) {
7354
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7355
+ }
7356
+ if (!rawResponse.choices[0]) {
7357
+ throw new PipelineExecutionError('No choises from Azure OpenAI');
7358
+ }
7359
+ if (rawResponse.choices.length > 1) {
7360
+ // TODO: This should be maybe only warning
7361
+ throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7362
+ }
7363
+ if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7364
+ throw new PipelineExecutionError('Empty response from Azure OpenAI');
7365
+ }
7366
+ resultContent = rawResponse.choices[0].message.content;
7367
+ // eslint-disable-next-line prefer-const
7368
+ complete = getCurrentIsoDate();
7369
+ usage = {
7370
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7371
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7372
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7373
+ };
7374
+ return [2 /*return*/, {
7375
+ content: resultContent,
7376
+ modelName: modelName,
7377
+ timing: {
7378
+ start: start,
7379
+ complete: complete,
7380
+ },
7381
+ usage: usage,
7382
+ rawPromptContent: rawPromptContent,
7383
+ rawRequest: rawRequest,
7384
+ rawResponse: rawResponse,
7385
+ // <- [🗯]
7386
+ }];
7387
+ case 3:
7388
+ error_1 = _d.sent();
7389
+ throw this.transformAzureError(error_1);
7390
+ case 4: return [2 /*return*/];
7391
+ }
7392
+ });
7393
+ });
7394
+ };
7395
+ /**
7396
+ * Calls Azure OpenAI API to use a complete model.
7397
+ */
7398
+ AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7399
+ var _a, _b;
7400
+ return __awaiter(this, void 0, void 0, function () {
7401
+ var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7402
+ var _c;
7403
+ return __generator(this, function (_d) {
7404
+ switch (_d.label) {
7405
+ case 0:
7406
+ if (this.options.isVerbose) {
7407
+ console.info('🖋 OpenAI callCompletionModel call');
7408
+ }
7409
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7410
+ // TODO: [☂] Use here more modelRequirements
7411
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
7412
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7413
+ }
7414
+ _d.label = 1;
7415
+ case 1:
7416
+ _d.trys.push([1, 3, , 4]);
7417
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7418
+ modelSettings = {
7419
+ maxTokens: modelRequirements.maxTokens || 2000,
7420
+ // <- TODO: [🌾] Make some global max cap for maxTokens
7421
+ temperature: modelRequirements.temperature,
7422
+ user: this.options.user,
7423
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7424
+ // <- Note: [🧆]
7425
+ };
7426
+ start = getCurrentIsoDate();
7427
+ complete = void 0;
7428
+ if (this.options.isVerbose) {
7429
+ console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
7430
+ console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7431
+ }
7432
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7433
+ rawRequest = [
7434
+ modelName,
7435
+ [rawPromptContent],
7436
+ modelSettings,
7437
+ ];
7438
+ return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7439
+ case 2:
7440
+ rawResponse = _d.sent();
7441
+ if (this.options.isVerbose) {
7442
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7443
+ }
7444
+ if (!rawResponse.choices[0]) {
7445
+ throw new PipelineExecutionError('No choises from OpenAI');
7446
+ }
7447
+ if (rawResponse.choices.length > 1) {
7448
+ // TODO: This should be maybe only warning
7449
+ throw new PipelineExecutionError('More than one choise from OpenAI');
7450
+ }
7451
+ resultContent = rawResponse.choices[0].text;
7452
+ // eslint-disable-next-line prefer-const
7453
+ complete = getCurrentIsoDate();
7454
+ usage = {
7455
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7456
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7457
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7458
+ };
7459
+ return [2 /*return*/, {
7460
+ content: resultContent,
7461
+ modelName: modelName,
7462
+ timing: {
7463
+ start: start,
7464
+ complete: complete,
7465
+ },
7466
+ usage: usage,
7467
+ rawPromptContent: rawPromptContent,
7468
+ rawRequest: rawRequest,
7469
+ rawResponse: rawResponse,
7470
+ // <- [🗯]
7471
+ }];
7472
+ case 3:
7473
+ error_2 = _d.sent();
7474
+ throw this.transformAzureError(error_2);
7475
+ case 4: return [2 /*return*/];
7476
+ }
7477
+ });
7478
+ });
7479
+ };
7480
+ // <- Note: [🤖] callXxxModel
7481
+ /**
7482
+ * Changes Azure error (which is not propper Error but object) to propper Error
7483
+ */
7484
+ AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7485
+ if (typeof azureError !== 'object' || azureError === null) {
7486
+ return new PipelineExecutionError("Unknown Azure OpenAI error");
7487
+ }
7488
+ var code = azureError.code, message = azureError.message;
7489
+ return new PipelineExecutionError("".concat(code, ": ").concat(message));
7490
+ };
7491
+ /**
7492
+ * List all available Azure OpenAI models that can be used
7493
+ */
7494
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
7495
+ return __awaiter(this, void 0, void 0, function () {
7496
+ return __generator(this, function (_a) {
7497
+ // TODO: !!! Do here some filtering which models are really available as deployment
7498
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7499
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7500
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7501
+ return ({
7502
+ modelTitle: "Azure ".concat(modelTitle),
7503
+ modelName: modelName,
7504
+ modelVariant: modelVariant,
7505
+ });
7506
+ })];
7507
+ });
7508
+ });
7509
+ };
7510
+ return AzureOpenAiExecutionTools;
7511
+ }());
7512
+ /**
7513
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7514
+ * TODO: Maybe make custom AzureOpenaiError
7515
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7516
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7517
+ */
7518
+
7260
7519
  /**
7261
7520
  * Computes the usage of the OpenAI API based on the response from OpenAI
7262
7521
  *
@@ -7593,12 +7852,56 @@ var OpenAiExecutionTools = /** @class */ (function () {
7593
7852
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7594
7853
  * TODO: Maybe make custom OpenaiError
7595
7854
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7855
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7856
+ */
7857
+
7858
+ /**
7859
+ * @private internal type for `createLlmToolsFromConfiguration`
7860
+ */
7861
+ var EXECUTION_TOOLS_CLASSES = {
7862
+ getOpenAiExecutionTools: function (options) {
7863
+ return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7864
+ },
7865
+ getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
7866
+ getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7867
+ // <- Note: [🦑] Add here new LLM provider
7868
+ };
7869
+ /**
7870
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
7871
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7596
7872
  */
7597
7873
 
7598
7874
  /**
7599
7875
  * @@@
7600
7876
  *
7601
- * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7877
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7878
+ *
7879
+ * @returns @@@
7880
+ * @public exported from `@promptbook/node`
7881
+ */
7882
+ function createLlmToolsFromConfiguration(configuration, options) {
7883
+ if (options === void 0) { options = {}; }
7884
+ if (!isRunningInNode()) {
7885
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7886
+ }
7887
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7888
+ dotenv.config();
7889
+ var llmTools = configuration.map(function (llmConfiguration) {
7890
+ return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7891
+ });
7892
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7893
+ }
7894
+ /**
7895
+ * TODO: [🧠][🎌] Dynamically install required providers
7896
+ * TODO: @@@ write discussion about this - wizzard
7897
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
7898
+ * TODO: [🧠] Is there some meaningfull way how to test this util
7899
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7900
+ * TODO: This should be maybe not under `_common` but under `utils`
7901
+ */
7902
+
7903
+ /**
7904
+ * @@@
7602
7905
  *
7603
7906
  * @@@ .env
7604
7907
  *
@@ -7609,46 +7912,77 @@ var OpenAiExecutionTools = /** @class */ (function () {
7609
7912
  * @returns @@@
7610
7913
  * @public exported from `@promptbook/node`
7611
7914
  */
7612
- function createLlmToolsFromEnv(options) {
7613
- if (options === void 0) { options = {}; }
7915
+ function createLlmToolsFromConfigurationFromEnv() {
7614
7916
  if (!isRunningInNode()) {
7615
7917
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7616
7918
  }
7617
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7618
- dotenv.config();
7619
- var llmTools = [];
7919
+ var llmToolsConfiguration = [];
7620
7920
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7621
- llmTools.push(new OpenAiExecutionTools({
7622
- isVerbose: isVerbose,
7623
- apiKey: process.env.OPENAI_API_KEY,
7624
- }));
7921
+ llmToolsConfiguration.push({
7922
+ title: 'OpenAI (from env)',
7923
+ packageName: '@promptbook/openai',
7924
+ className: 'OpenAiExecutionTools',
7925
+ options: {
7926
+ apiKey: process.env.OPENAI_API_KEY,
7927
+ },
7928
+ });
7625
7929
  }
7626
7930
  if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
7627
- llmTools.push(new AnthropicClaudeExecutionTools({
7628
- isVerbose: isVerbose,
7629
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7630
- }));
7631
- }
7632
- if (llmTools.length === 0) {
7633
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7931
+ llmToolsConfiguration.push({
7932
+ title: 'Claude (from env)',
7933
+ packageName: '@promptbook/antrhopic-claude',
7934
+ className: 'AnthropicClaudeExecutionTools',
7935
+ options: {
7936
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7937
+ },
7938
+ });
7634
7939
  }
7635
- else if (llmTools.length === 1) {
7636
- return llmTools[0];
7940
+ // <- Note: [🦑] Add here new LLM provider
7941
+ return llmToolsConfiguration;
7942
+ }
7943
+ /**
7944
+ * TODO: Add Azure OpenAI
7945
+ * TODO: [🧠][🍛]
7946
+ * TODO: [🧠] Is there some meaningfull way how to test this util
7947
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7948
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7949
+ * TODO: This should be maybe not under `_common` but under `utils`
7950
+ * TODO: [🧠] Maybe pass env as argument
7951
+ */
7952
+
7953
+ /**
7954
+ * @@@
7955
+ *
7956
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7957
+ *
7958
+ * @@@ .env
7959
+ *
7960
+ * It looks for environment variables:
7961
+ * - `process.env.OPENAI_API_KEY`
7962
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
7963
+ *
7964
+ * @returns @@@
7965
+ * @public exported from `@promptbook/node`
7966
+ */
7967
+ function createLlmToolsFromEnv(options) {
7968
+ if (options === void 0) { options = {}; }
7969
+ if (!isRunningInNode()) {
7970
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7637
7971
  }
7638
- else {
7639
- return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7972
+ var configuration = createLlmToolsFromConfigurationFromEnv();
7973
+ if (configuration.length === 0) {
7974
+ // TODO: [🥃]
7975
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7640
7976
  }
7977
+ return createLlmToolsFromConfiguration(configuration, options);
7641
7978
  }
7642
7979
  /**
7643
- * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7644
- * TODO: @@@ write discussion about this - wizzard
7645
- * TODO: Add Azure
7646
- * TODO: [🧠] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
7980
+ * TODO: @@@ write `createLlmToolsFromEnv` vs `createLlmToolsFromConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
7981
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
7647
7982
  * TODO: [🧠] Is there some meaningfull way how to test this util
7648
- * TODO: [🧠] Maybe pass env as argument
7649
7983
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7650
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7651
7984
  * TODO: [🥃] Allow `ptbk make` without llm tools
7985
+ * TODO: This should be maybe not under `_common` but under `utils`
7652
7986
  */
7653
7987
 
7654
7988
  /**
@@ -7845,6 +8179,7 @@ function getLlmToolsForCli(options) {
7845
8179
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
7846
8180
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7847
8181
  * TODO: [🥃] Allow `ptbk make` without llm tools
8182
+ * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
7848
8183
  */
7849
8184
 
7850
8185
  /**