@promptbook/node 0.63.3 → 0.64.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/esm/index.es.js +462 -94
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +9 -171
  4. package/esm/typings/src/_packages/node.index.d.ts +6 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +5 -2
  6. package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -2
  7. package/esm/typings/src/llm-providers/_common/LlmConfiguration.d.ts +28 -0
  8. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +22 -0
  9. package/esm/typings/src/llm-providers/_common/config.d.ts +15 -0
  10. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfiguration.d.ts +32 -0
  11. package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +23 -0
  12. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +7 -22
  13. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  16. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
  17. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +3 -1
  18. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
  19. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  20. package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
  21. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +8 -2
  22. package/esm/typings/src/types/typeAliases.d.ts +2 -2
  23. package/esm/typings/src/utils/organization/TODO_string.d.ts +6 -0
  24. package/package.json +3 -2
  25. package/umd/index.umd.js +467 -97
  26. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -4,18 +4,19 @@ import { join as join$1, dirname } from 'path';
4
4
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
5
  import { format } from 'prettier';
6
6
  import parserHtml from 'prettier/parser-html';
7
+ import hexEncoder from 'crypto-js/enc-hex';
8
+ import sha256 from 'crypto-js/sha256';
7
9
  import { join } from 'path/posix';
8
- import * as dotenv from 'dotenv';
9
10
  import Anthropic from '@anthropic-ai/sdk';
11
+ import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
10
12
  import OpenAI from 'openai';
11
- import hexEncoder from 'crypto-js/enc-hex';
12
- import sha256 from 'crypto-js/sha256';
13
+ import * as dotenv from 'dotenv';
13
14
 
14
15
  // ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
15
16
  /**
16
17
  * The version of the Promptbook library
17
18
  */
18
- var PROMPTBOOK_VERSION = '0.63.2';
19
+ var PROMPTBOOK_VERSION = '0.63.4';
19
20
  // TODO: !!!! List here all the versions and annotate + put into script
20
21
 
21
22
  /*! *****************************************************************************
@@ -390,7 +391,7 @@ function pipelineJsonToString(pipelineJson) {
390
391
  else if (blockType === 'PROMPT_DIALOG') {
391
392
  commands_1.push("PROMPT DIALOG");
392
393
  // Note: Nothing special here
393
- } // <- }else if([🩻]
394
+ } // <- }else if([🅱]
394
395
  if (jokers) {
395
396
  try {
396
397
  for (var jokers_1 = (e_4 = void 0, __values(jokers)), jokers_1_1 = jokers_1.next(); !jokers_1_1.done; jokers_1_1 = jokers_1.next()) {
@@ -690,7 +691,7 @@ function forEachAsync(array, options, callbackfunction) {
690
691
  });
691
692
  }
692
693
 
693
- var PipelineCollection = [{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md",title:"Prepare Knowledge from Markdown",promptbookVersion:"0.63.2",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.2",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md",title:"Prepare Keywords",promptbookVersion:"0.63.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.2",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md",title:"Prepare Title",promptbookVersion:"0.63.2",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.2",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]},{pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",sourceFile:"./promptbook-collection/prepare-persona.ptbk.md",title:"Prepare Keywords",promptbookVersion:"0.63.2",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.63.2",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}]}];
694
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
694
695
 
695
696
  /**
696
697
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1257,7 +1258,7 @@ var SimplePipelineCollection = /** @class */ (function () {
1257
1258
  pipelineJsonToString(unpreparePipeline(pipeline)) !==
1258
1259
  pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
1259
1260
  var existing = this.collection.get(pipeline.pipelineUrl);
1260
- throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1261
+ throw new ReferenceError$1(spaceTrim$1("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4E\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1261
1262
  }
1262
1263
  // Note: [🧠] Overwrite existing pipeline with the same URL
1263
1264
  this.collection.set(pipeline.pipelineUrl, pipeline);
@@ -1934,6 +1935,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1934
1935
  // <- Note: [🤖]
1935
1936
  /**
1936
1937
  * Calls the best available model
1938
+ *
1939
+ * Note: This should be private or protected but is public to be usable with duck typing
1937
1940
  */
1938
1941
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
1939
1942
  return __awaiter(this, void 0, void 0, function () {
@@ -3442,7 +3445,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
3442
3445
  var partialPieces, pieces;
3443
3446
  return __generator(this, function (_a) {
3444
3447
  switch (_a.label) {
3445
- case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3448
+ case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
3446
3449
  options)];
3447
3450
  case 1:
3448
3451
  partialPieces = _a.sent();
@@ -3632,6 +3635,35 @@ function preparePersona(personaDescription, options) {
3632
3635
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
3633
3636
  */
3634
3637
 
3638
+ /**
3639
+ * @@@
3640
+ *
3641
+ * Note: It is usefull @@@
3642
+ *
3643
+ * @param pipeline
3644
+ * @public exported from `@promptbook/utils`
3645
+ */
3646
+ function clonePipeline(pipeline) {
3647
+ // Note: Not using spread operator (...) because @@@
3648
+ var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3649
+ return {
3650
+ pipelineUrl: pipelineUrl,
3651
+ sourceFile: sourceFile,
3652
+ title: title,
3653
+ promptbookVersion: promptbookVersion,
3654
+ description: description,
3655
+ parameters: parameters,
3656
+ promptTemplates: promptTemplates,
3657
+ knowledgeSources: knowledgeSources,
3658
+ knowledgePieces: knowledgePieces,
3659
+ personas: personas,
3660
+ preparations: preparations,
3661
+ };
3662
+ }
3663
+ /**
3664
+ * TODO: [🍙] Make some standart order of json properties
3665
+ */
3666
+
3635
3667
  /**
3636
3668
  * @@@
3637
3669
  *
@@ -3684,40 +3716,12 @@ function prepareTemplates(pipeline, options) {
3684
3716
  * TODO: [🧠][🥜]
3685
3717
  */
3686
3718
 
3687
- /**
3688
- * @@@
3689
- *
3690
- * Note: It is usefull @@@
3691
- *
3692
- * @param pipeline
3693
- * @public exported from `@promptbook/utils`
3694
- */
3695
- function clonePipeline(pipeline) {
3696
- // Note: Not using spread operator (...) because @@@
3697
- var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
3698
- return {
3699
- pipelineUrl: pipelineUrl,
3700
- sourceFile: sourceFile,
3701
- title: title,
3702
- promptbookVersion: promptbookVersion,
3703
- description: description,
3704
- parameters: parameters,
3705
- promptTemplates: promptTemplates,
3706
- knowledgeSources: knowledgeSources,
3707
- knowledgePieces: knowledgePieces,
3708
- personas: personas,
3709
- preparations: preparations,
3710
- };
3711
- }
3712
- /**
3713
- * TODO: [🍙] Make some standart order of json properties
3714
- */
3715
-
3716
3719
  /**
3717
3720
  * Prepare pipeline from string (markdown) format to JSON format
3718
3721
  *
3719
3722
  * Note: This function does not validate logic of the pipeline
3720
3723
  * Note: This function acts as part of compilation process
3724
+ * Note: When the pipeline is already prepared, it returns the same pipeline
3721
3725
  * @public exported from `@promptbook/core`
3722
3726
  */
3723
3727
  function preparePipeline(pipeline, options) {
@@ -3732,6 +3736,9 @@ function preparePipeline(pipeline, options) {
3732
3736
  return __generator(this, function (_c) {
3733
3737
  switch (_c.label) {
3734
3738
  case 0:
3739
+ if (isPipelinePrepared(pipeline)) {
3740
+ return [2 /*return*/, pipeline];
3741
+ }
3735
3742
  llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
3736
3743
  parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
3737
3744
  llmToolsWithUsage = countTotalUsage(llmTools);
@@ -3865,34 +3872,37 @@ var knowledgeCommandParser = {
3865
3872
  */
3866
3873
  parse: function (input) {
3867
3874
  var args = input.args;
3868
- var source = args[0];
3869
- if (source === undefined) {
3875
+ var sourceContent = spaceTrim(args[0] || '');
3876
+ if (sourceContent === '') {
3870
3877
  throw new ParsingError("Source is not defined");
3871
3878
  }
3872
- if (source.startsWith('http://')) {
3879
+ // TODO: !!!! Following checks should be applied every link in the `sourceContent`
3880
+ if (sourceContent.startsWith('http://')) {
3873
3881
  throw new ParsingError("Source is not secure");
3874
3882
  }
3875
- if (!(isValidFilePath(source) || isValidUrl(source))) {
3883
+ if (!(isValidFilePath(sourceContent) || isValidUrl(sourceContent))) {
3876
3884
  throw new ParsingError("Source not valid");
3877
3885
  }
3878
- if (source.startsWith('../') || source.startsWith('/') || /^[A-Z]:[\\/]+/i.test(source)) {
3886
+ if (sourceContent.startsWith('../') || sourceContent.startsWith('/') || /^[A-Z]:[\\/]+/i.test(sourceContent)) {
3879
3887
  throw new ParsingError("Source cannot be outside of the .ptbk.md folder");
3880
3888
  }
3881
3889
  return {
3882
3890
  type: 'KNOWLEDGE',
3883
- source: source,
3891
+ sourceContent: sourceContent,
3884
3892
  };
3885
3893
  },
3886
3894
  /**
3887
3895
  * Note: Prototype of [🍧] (remove this comment after full implementation)
3888
3896
  */
3889
3897
  applyToPipelineJson: function (personaCommand, subjects) {
3890
- var source = personaCommand.source;
3898
+ var sourceContent = personaCommand.sourceContent;
3891
3899
  var pipelineJson = subjects.pipelineJson;
3892
- var name = titleToName(source);
3900
+ var name = 'source-' + sha256(hexEncoder.parse(JSON.stringify(sourceContent))).toString( /* hex */);
3901
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
3902
+ // <- TODO: This should be replaced with a better name later in preparation (done with some propper LLM summarization)
3893
3903
  pipelineJson.knowledgeSources.push({
3894
3904
  name: name,
3895
- source: source,
3905
+ sourceContent: sourceContent,
3896
3906
  });
3897
3907
  },
3898
3908
  };
@@ -4085,7 +4095,7 @@ var BlockTypes = [
4085
4095
  'KNOWLEDGE',
4086
4096
  'INSTRUMENT',
4087
4097
  'ACTION',
4088
- // <- [🩻]
4098
+ // <- [🅱]
4089
4099
  ];
4090
4100
 
4091
4101
  /**
@@ -4112,7 +4122,7 @@ var blockCommandParser = {
4112
4122
  'KNOWLEDGE',
4113
4123
  'INSTRUMENT',
4114
4124
  'ACTION',
4115
- // <- [🩻]
4125
+ // <- [🅱]
4116
4126
  ],
4117
4127
  /**
4118
4128
  * Aliases for the BLOCK command
@@ -5578,7 +5588,7 @@ function pipelineStringToJsonSync(pipelineString) {
5578
5588
  if (command.blockType === 'KNOWLEDGE') {
5579
5589
  knowledgeCommandParser.applyToPipelineJson({
5580
5590
  type: 'KNOWLEDGE',
5581
- source: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5591
+ sourceContent: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
5582
5592
  }, {
5583
5593
  pipelineJson: pipelineJson,
5584
5594
  templateJson: templateJson,
@@ -6189,7 +6199,7 @@ function createCollectionFromDirectory(path, options) {
6189
6199
  }
6190
6200
  else {
6191
6201
  existing = collection.get(pipeline.pipelineUrl);
6192
- throw new ReferenceError(spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6202
+ throw new ReferenceError(spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4F\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6193
6203
  }
6194
6204
  }
6195
6205
  }
@@ -6255,22 +6265,6 @@ function createCollectionFromDirectory(path, options) {
6255
6265
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
6256
6266
  */
6257
6267
 
6258
- /**
6259
- * This error type indicates that you try to use a feature that is not available in the current environment
6260
- *
6261
- * @public exported from `@promptbook/core`
6262
- */
6263
- var EnvironmentMismatchError = /** @class */ (function (_super) {
6264
- __extends(EnvironmentMismatchError, _super);
6265
- function EnvironmentMismatchError(message) {
6266
- var _this = _super.call(this, message) || this;
6267
- _this.name = 'EnvironmentMismatchError';
6268
- Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
6269
- return _this;
6270
- }
6271
- return EnvironmentMismatchError;
6272
- }(Error));
6273
-
6274
6268
  /**
6275
6269
  * Helper of usage compute
6276
6270
  *
@@ -6616,6 +6610,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6616
6610
  * TODO: Maybe make custom OpenaiError
6617
6611
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6618
6612
  * TODO: [🍜] Auto use anonymous server in browser
6613
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6619
6614
  */
6620
6615
 
6621
6616
  /**
@@ -6969,6 +6964,255 @@ var OPENAI_MODELS = [
6969
6964
  * TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
6970
6965
  */
6971
6966
 
6967
+ /**
6968
+ * Execution Tools for calling Azure OpenAI API.
6969
+ *
6970
+ * @public exported from `@promptbook/azure-openai`
6971
+ */
6972
+ var AzureOpenAiExecutionTools = /** @class */ (function () {
6973
+ /**
6974
+ * Creates OpenAI Execution Tools.
6975
+ *
6976
+ * @param options which are relevant are directly passed to the OpenAI client
6977
+ */
6978
+ function AzureOpenAiExecutionTools(options) {
6979
+ this.options = options;
6980
+ this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
6981
+ }
6982
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
6983
+ get: function () {
6984
+ return 'Azure OpenAI';
6985
+ },
6986
+ enumerable: false,
6987
+ configurable: true
6988
+ });
6989
+ Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
6990
+ get: function () {
6991
+ return 'Use all models trained by OpenAI provided by Azure';
6992
+ },
6993
+ enumerable: false,
6994
+ configurable: true
6995
+ });
6996
+ /**
6997
+ * Calls OpenAI API to use a chat model.
6998
+ */
6999
+ AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7000
+ var _a, _b;
7001
+ return __awaiter(this, void 0, void 0, function () {
7002
+ var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7003
+ var _c;
7004
+ return __generator(this, function (_d) {
7005
+ switch (_d.label) {
7006
+ case 0:
7007
+ if (this.options.isVerbose) {
7008
+ console.info('💬 OpenAI callChatModel call');
7009
+ }
7010
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7011
+ // TODO: [☂] Use here more modelRequirements
7012
+ if (modelRequirements.modelVariant !== 'CHAT') {
7013
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7014
+ }
7015
+ _d.label = 1;
7016
+ case 1:
7017
+ _d.trys.push([1, 3, , 4]);
7018
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7019
+ modelSettings = {
7020
+ maxTokens: modelRequirements.maxTokens,
7021
+ // <- TODO: [🌾] Make some global max cap for maxTokens
7022
+ temperature: modelRequirements.temperature,
7023
+ user: this.options.user,
7024
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7025
+ // <- Note: [🧆]
7026
+ };
7027
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7028
+ messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
7029
+ ? []
7030
+ : [
7031
+ {
7032
+ role: 'system',
7033
+ content: modelRequirements.systemMessage,
7034
+ },
7035
+ ])), false), [
7036
+ {
7037
+ role: 'user',
7038
+ content: rawPromptContent,
7039
+ },
7040
+ ], false);
7041
+ start = getCurrentIsoDate();
7042
+ complete = void 0;
7043
+ if (this.options.isVerbose) {
7044
+ console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7045
+ }
7046
+ rawRequest = [modelName, messages, modelSettings];
7047
+ return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7048
+ case 2:
7049
+ rawResponse = _d.sent();
7050
+ if (this.options.isVerbose) {
7051
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7052
+ }
7053
+ if (!rawResponse.choices[0]) {
7054
+ throw new PipelineExecutionError('No choises from Azure OpenAI');
7055
+ }
7056
+ if (rawResponse.choices.length > 1) {
7057
+ // TODO: This should be maybe only warning
7058
+ throw new PipelineExecutionError('More than one choise from Azure OpenAI');
7059
+ }
7060
+ if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
7061
+ throw new PipelineExecutionError('Empty response from Azure OpenAI');
7062
+ }
7063
+ resultContent = rawResponse.choices[0].message.content;
7064
+ // eslint-disable-next-line prefer-const
7065
+ complete = getCurrentIsoDate();
7066
+ usage = {
7067
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7068
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7069
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7070
+ };
7071
+ return [2 /*return*/, {
7072
+ content: resultContent,
7073
+ modelName: modelName,
7074
+ timing: {
7075
+ start: start,
7076
+ complete: complete,
7077
+ },
7078
+ usage: usage,
7079
+ rawPromptContent: rawPromptContent,
7080
+ rawRequest: rawRequest,
7081
+ rawResponse: rawResponse,
7082
+ // <- [🗯]
7083
+ }];
7084
+ case 3:
7085
+ error_1 = _d.sent();
7086
+ throw this.transformAzureError(error_1);
7087
+ case 4: return [2 /*return*/];
7088
+ }
7089
+ });
7090
+ });
7091
+ };
7092
+ /**
7093
+ * Calls Azure OpenAI API to use a complete model.
7094
+ */
7095
+ AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7096
+ var _a, _b;
7097
+ return __awaiter(this, void 0, void 0, function () {
7098
+ var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7099
+ var _c;
7100
+ return __generator(this, function (_d) {
7101
+ switch (_d.label) {
7102
+ case 0:
7103
+ if (this.options.isVerbose) {
7104
+ console.info('🖋 OpenAI callCompletionModel call');
7105
+ }
7106
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7107
+ // TODO: [☂] Use here more modelRequirements
7108
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
7109
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7110
+ }
7111
+ _d.label = 1;
7112
+ case 1:
7113
+ _d.trys.push([1, 3, , 4]);
7114
+ modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7115
+ modelSettings = {
7116
+ maxTokens: modelRequirements.maxTokens || 2000,
7117
+ // <- TODO: [🌾] Make some global max cap for maxTokens
7118
+ temperature: modelRequirements.temperature,
7119
+ user: this.options.user,
7120
+ // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
7121
+ // <- Note: [🧆]
7122
+ };
7123
+ start = getCurrentIsoDate();
7124
+ complete = void 0;
7125
+ if (this.options.isVerbose) {
7126
+ console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
7127
+ console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
7128
+ }
7129
+ rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
7130
+ rawRequest = [
7131
+ modelName,
7132
+ [rawPromptContent],
7133
+ modelSettings,
7134
+ ];
7135
+ return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7136
+ case 2:
7137
+ rawResponse = _d.sent();
7138
+ if (this.options.isVerbose) {
7139
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7140
+ }
7141
+ if (!rawResponse.choices[0]) {
7142
+ throw new PipelineExecutionError('No choises from OpenAI');
7143
+ }
7144
+ if (rawResponse.choices.length > 1) {
7145
+ // TODO: This should be maybe only warning
7146
+ throw new PipelineExecutionError('More than one choise from OpenAI');
7147
+ }
7148
+ resultContent = rawResponse.choices[0].text;
7149
+ // eslint-disable-next-line prefer-const
7150
+ complete = getCurrentIsoDate();
7151
+ usage = {
7152
+ price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
7153
+ input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
7154
+ output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
7155
+ };
7156
+ return [2 /*return*/, {
7157
+ content: resultContent,
7158
+ modelName: modelName,
7159
+ timing: {
7160
+ start: start,
7161
+ complete: complete,
7162
+ },
7163
+ usage: usage,
7164
+ rawPromptContent: rawPromptContent,
7165
+ rawRequest: rawRequest,
7166
+ rawResponse: rawResponse,
7167
+ // <- [🗯]
7168
+ }];
7169
+ case 3:
7170
+ error_2 = _d.sent();
7171
+ throw this.transformAzureError(error_2);
7172
+ case 4: return [2 /*return*/];
7173
+ }
7174
+ });
7175
+ });
7176
+ };
7177
+ // <- Note: [🤖] callXxxModel
7178
+ /**
7179
+ * Changes Azure error (which is not propper Error but object) to propper Error
7180
+ */
7181
+ AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
7182
+ if (typeof azureError !== 'object' || azureError === null) {
7183
+ return new PipelineExecutionError("Unknown Azure OpenAI error");
7184
+ }
7185
+ var code = azureError.code, message = azureError.message;
7186
+ return new PipelineExecutionError("".concat(code, ": ").concat(message));
7187
+ };
7188
+ /**
7189
+ * List all available Azure OpenAI models that can be used
7190
+ */
7191
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
7192
+ return __awaiter(this, void 0, void 0, function () {
7193
+ return __generator(this, function (_a) {
7194
+ // TODO: !!! Do here some filtering which models are really available as deployment
7195
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7196
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7197
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7198
+ return ({
7199
+ modelTitle: "Azure ".concat(modelTitle),
7200
+ modelName: modelName,
7201
+ modelVariant: modelVariant,
7202
+ });
7203
+ })];
7204
+ });
7205
+ });
7206
+ };
7207
+ return AzureOpenAiExecutionTools;
7208
+ }());
7209
+ /**
7210
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7211
+ * TODO: Maybe make custom AzureOpenaiError
7212
+ * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7213
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7214
+ */
7215
+
6972
7216
  /**
6973
7217
  * Computes the usage of the OpenAI API based on the response from OpenAI
6974
7218
  *
@@ -7305,12 +7549,104 @@ var OpenAiExecutionTools = /** @class */ (function () {
7305
7549
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7306
7550
  * TODO: Maybe make custom OpenaiError
7307
7551
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7552
+ * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7308
7553
  */
7309
7554
 
7555
+ /**
7556
+ * @public exported from `@promptbook/node`
7557
+ */
7558
+ var LLM_CONFIGURATION_BOILERPLATES = [
7559
+ {
7560
+ title: 'Open AI',
7561
+ packageName: '@promptbook/openai',
7562
+ className: 'OpenAiExecutionTools',
7563
+ options: {
7564
+ apiKey: 'sk-',
7565
+ },
7566
+ },
7567
+ {
7568
+ title: 'Anthropic Claude',
7569
+ packageName: '@promptbook/anthropic-claude',
7570
+ className: 'AnthropicClaudeExecutionTools',
7571
+ options: {
7572
+ apiKey: 'sk-ant-api03-',
7573
+ },
7574
+ },
7575
+ {
7576
+ title: 'Azure Open AI',
7577
+ packageName: '@promptbook/azure-openai',
7578
+ className: 'AzureOpenAiExecutionTools',
7579
+ options: {
7580
+ // TODO: !!!> resourceName
7581
+ // TODO: !!!> deploymentName
7582
+ apiKey: 'sk-',
7583
+ },
7584
+ },
7585
+ // <- Note: [🦑] Add here new LLM provider
7586
+ ];
7587
+ /**
7588
+ * @private internal type for `createLlmToolsFromConfiguration`
7589
+ */
7590
+ var EXECUTION_TOOLS_CLASSES = {
7591
+ getOpenAiExecutionTools: function (options) {
7592
+ return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7593
+ },
7594
+ getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
7595
+ getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7596
+ // <- Note: [🦑] Add here new LLM provider
7597
+ };
7598
+ /**
7599
+ * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
7600
+ * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7601
+ */
7602
+
7603
+ /**
7604
+ * This error type indicates that you try to use a feature that is not available in the current environment
7605
+ *
7606
+ * @public exported from `@promptbook/core`
7607
+ */
7608
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
7609
+ __extends(EnvironmentMismatchError, _super);
7610
+ function EnvironmentMismatchError(message) {
7611
+ var _this = _super.call(this, message) || this;
7612
+ _this.name = 'EnvironmentMismatchError';
7613
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
7614
+ return _this;
7615
+ }
7616
+ return EnvironmentMismatchError;
7617
+ }(Error));
7618
+
7310
7619
  /**
7311
7620
  * @@@
7312
7621
  *
7313
- * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7622
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7623
+ *
7624
+ * @returns @@@
7625
+ * @public exported from `@promptbook/node`
7626
+ */
7627
+ function createLlmToolsFromConfiguration(configuration, options) {
7628
+ if (options === void 0) { options = {}; }
7629
+ if (!isRunningInNode()) {
7630
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7631
+ }
7632
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7633
+ dotenv.config();
7634
+ var llmTools = configuration.map(function (llmConfiguration) {
7635
+ return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7636
+ });
7637
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7638
+ }
7639
+ /**
7640
+ * TODO: [🧠][🎌] Dynamically install required providers
7641
+ * TODO: @@@ write discussion about this - wizzard
7642
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
7643
+ * TODO: [🧠] Is there some meaningfull way how to test this util
7644
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7645
+ * TODO: This should be maybe not under `_common` but under `utils`
7646
+ */
7647
+
7648
+ /**
7649
+ * @@@
7314
7650
  *
7315
7651
  * @@@ .env
7316
7652
  *
@@ -7321,46 +7657,77 @@ var OpenAiExecutionTools = /** @class */ (function () {
7321
7657
  * @returns @@@
7322
7658
  * @public exported from `@promptbook/node`
7323
7659
  */
7324
- function createLlmToolsFromEnv(options) {
7325
- if (options === void 0) { options = {}; }
7660
+ function createLlmToolsFromConfigurationFromEnv() {
7326
7661
  if (!isRunningInNode()) {
7327
7662
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7328
7663
  }
7329
- var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7330
- dotenv.config();
7331
- var llmTools = [];
7664
+ var llmToolsConfiguration = [];
7332
7665
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7333
- llmTools.push(new OpenAiExecutionTools({
7334
- isVerbose: isVerbose,
7335
- apiKey: process.env.OPENAI_API_KEY,
7336
- }));
7666
+ llmToolsConfiguration.push({
7667
+ title: 'OpenAI (from env)',
7668
+ packageName: '@promptbook/openai',
7669
+ className: 'OpenAiExecutionTools',
7670
+ options: {
7671
+ apiKey: process.env.OPENAI_API_KEY,
7672
+ },
7673
+ });
7337
7674
  }
7338
7675
  if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
7339
- llmTools.push(new AnthropicClaudeExecutionTools({
7340
- isVerbose: isVerbose,
7341
- apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7342
- }));
7343
- }
7344
- if (llmTools.length === 0) {
7345
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7676
+ llmToolsConfiguration.push({
7677
+ title: 'Claude (from env)',
7678
+ packageName: '@promptbook/antrhopic-claude',
7679
+ className: 'AnthropicClaudeExecutionTools',
7680
+ options: {
7681
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
7682
+ },
7683
+ });
7346
7684
  }
7347
- else if (llmTools.length === 1) {
7348
- return llmTools[0];
7685
+ // <- Note: [🦑] Add here new LLM provider
7686
+ return llmToolsConfiguration;
7687
+ }
7688
+ /**
7689
+ * TODO: Add Azure OpenAI
7690
+ * TODO: [🧠][🍛]
7691
+ * TODO: [🧠] Is there some meaningfull way how to test this util
7692
+ * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7693
+ * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7694
+ * TODO: This should be maybe not under `_common` but under `utils`
7695
+ * TODO: [🧠] Maybe pass env as argument
7696
+ */
7697
+
7698
+ /**
7699
+ * @@@
7700
+ *
7701
+ * Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
7702
+ *
7703
+ * @@@ .env
7704
+ *
7705
+ * It looks for environment variables:
7706
+ * - `process.env.OPENAI_API_KEY`
7707
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
7708
+ *
7709
+ * @returns @@@
7710
+ * @public exported from `@promptbook/node`
7711
+ */
7712
+ function createLlmToolsFromEnv(options) {
7713
+ if (options === void 0) { options = {}; }
7714
+ if (!isRunningInNode()) {
7715
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7349
7716
  }
7350
- else {
7351
- return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7717
+ var configuration = createLlmToolsFromConfigurationFromEnv();
7718
+ if (configuration.length === 0) {
7719
+ // TODO: [🥃]
7720
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7352
7721
  }
7722
+ return createLlmToolsFromConfiguration(configuration, options);
7353
7723
  }
7354
7724
  /**
7355
- * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7356
- * TODO: @@@ write discussion about this - wizzard
7357
- * TODO: Add Azure
7358
- * TODO: [🧠] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
7725
+ * TODO: @@@ write `createLlmToolsFromEnv` vs `createLlmToolsFromConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
7726
+ * TODO: [🧠][🍛] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
7359
7727
  * TODO: [🧠] Is there some meaningfull way how to test this util
7360
- * TODO: [🧠] Maybe pass env as argument
7361
7728
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7362
- * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
7363
7729
  * TODO: [🥃] Allow `ptbk make` without llm tools
7730
+ * TODO: This should be maybe not under `_common` but under `utils`
7364
7731
  */
7365
7732
 
7366
7733
  /**
@@ -7414,6 +7781,7 @@ var FilesStorage = /** @class */ (function () {
7414
7781
  FilesStorage.prototype.getFilenameForKey = function (key) {
7415
7782
  var name = titleToName(key);
7416
7783
  var hash = sha256(hexEncoder.parse(name)).toString( /* hex */);
7784
+ // <- TODO: [🥬] Encapsulate sha256 to some private utility function
7417
7785
  return join$1.apply(void 0, __spreadArray(__spreadArray([this.options.cacheFolderPath], __read(nameToSubfolderPath(hash /* <- TODO: [🎎] Maybe add some SHA256 prefix */)), false), ["".concat(name.substring(0, MAX_FILENAME_LENGTH), ".json")], false));
7418
7786
  };
7419
7787
  /**
@@ -7490,5 +7858,5 @@ var FilesStorage = /** @class */ (function () {
7490
7858
  * Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
7491
7859
  */
7492
7860
 
7493
- export { FilesStorage, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };
7861
+ export { FilesStorage, LLM_CONFIGURATION_BOILERPLATES, PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromConfiguration, createLlmToolsFromConfigurationFromEnv, createLlmToolsFromEnv };
7494
7862
  //# sourceMappingURL=index.es.js.map