@promptbook/cli 0.63.3 → 0.64.0-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +412 -75
- package/esm/index.es.js.map +1 -1
- package/esm/typings/promptbook-collection/index.d.ts +9 -171
- package/esm/typings/src/_packages/node.index.d.ts +6 -0
- package/esm/typings/src/_packages/types.index.d.ts +5 -2
- package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/LlmConfiguration.d.ts +28 -0
- package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +22 -0
- package/esm/typings/src/llm-providers/_common/config.d.ts +15 -0
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfiguration.d.ts +32 -0
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +23 -0
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +7 -22
- package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +1 -0
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +3 -2
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +1 -0
- package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +3 -1
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +8 -2
- package/esm/typings/src/types/typeAliases.d.ts +2 -2
- package/esm/typings/src/utils/organization/TODO_string.d.ts +6 -0
- package/package.json +2 -1
- package/umd/index.umd.js +414 -78
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
(function (global, factory) {
|
|
2
|
-
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('
|
|
3
|
-
typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', '
|
|
4
|
-
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.
|
|
5
|
-
})(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml,
|
|
2
|
+
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('path/posix'), require('dotenv'), require('@anthropic-ai/sdk'), require('@azure/openai'), require('openai'), require('glob-promise')) :
|
|
3
|
+
typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'path/posix', 'dotenv', '@anthropic-ai/sdk', '@azure/openai', 'openai', 'glob-promise'], factory) :
|
|
4
|
+
(global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.posix, global.dotenv, global.Anthropic, global.openai, global.OpenAI, global.glob));
|
|
5
|
+
})(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, posix, dotenv, Anthropic, openai, OpenAI, glob) { 'use strict';
|
|
6
6
|
|
|
7
7
|
function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
|
|
8
8
|
|
|
@@ -39,7 +39,7 @@
|
|
|
39
39
|
/**
|
|
40
40
|
* The version of the Promptbook library
|
|
41
41
|
*/
|
|
42
|
-
var PROMPTBOOK_VERSION = '0.63.
|
|
42
|
+
var PROMPTBOOK_VERSION = '0.63.4';
|
|
43
43
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
44
44
|
|
|
45
45
|
/*! *****************************************************************************
|
|
@@ -560,7 +560,7 @@
|
|
|
560
560
|
else if (blockType === 'PROMPT_DIALOG') {
|
|
561
561
|
commands_1.push("PROMPT DIALOG");
|
|
562
562
|
// Note: Nothing special here
|
|
563
|
-
} // <- }else if([
|
|
563
|
+
} // <- }else if([🅱]
|
|
564
564
|
if (jokers) {
|
|
565
565
|
try {
|
|
566
566
|
for (var jokers_1 = (e_4 = void 0, __values(jokers)), jokers_1_1 = jokers_1.next(); !jokers_1_1.done; jokers_1_1 = jokers_1.next()) {
|
|
@@ -860,7 +860,7 @@
|
|
|
860
860
|
});
|
|
861
861
|
}
|
|
862
862
|
|
|
863
|
-
var PipelineCollection = [{pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",
|
|
863
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.63.4",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
864
864
|
|
|
865
865
|
/**
|
|
866
866
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -1427,7 +1427,7 @@
|
|
|
1427
1427
|
pipelineJsonToString(unpreparePipeline(pipeline)) !==
|
|
1428
1428
|
pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
|
|
1429
1429
|
var existing = this.collection.get(pipeline.pipelineUrl);
|
|
1430
|
-
throw new ReferenceError$1(spaceTrim.spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
|
|
1430
|
+
throw new ReferenceError$1(spaceTrim.spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4E\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
|
|
1431
1431
|
}
|
|
1432
1432
|
// Note: [🧠] Overwrite existing pipeline with the same URL
|
|
1433
1433
|
this.collection.set(pipeline.pipelineUrl, pipeline);
|
|
@@ -2104,6 +2104,8 @@
|
|
|
2104
2104
|
// <- Note: [🤖]
|
|
2105
2105
|
/**
|
|
2106
2106
|
* Calls the best available model
|
|
2107
|
+
*
|
|
2108
|
+
* Note: This should be private or protected but is public to be usable with duck typing
|
|
2107
2109
|
*/
|
|
2108
2110
|
MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
2109
2111
|
return __awaiter(this, void 0, void 0, function () {
|
|
@@ -3612,7 +3614,7 @@
|
|
|
3612
3614
|
var partialPieces, pieces;
|
|
3613
3615
|
return __generator(this, function (_a) {
|
|
3614
3616
|
switch (_a.label) {
|
|
3615
|
-
case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.
|
|
3617
|
+
case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.sourceContent, // <- TODO: [🐝] !!! Unhardcode markdown, detect which type it is - BE AWARE of big package size
|
|
3616
3618
|
options)];
|
|
3617
3619
|
case 1:
|
|
3618
3620
|
partialPieces = _a.sent();
|
|
@@ -3802,6 +3804,35 @@
|
|
|
3802
3804
|
* TODO: [🏢] !! Check validity of `temperature` in pipeline
|
|
3803
3805
|
*/
|
|
3804
3806
|
|
|
3807
|
+
/**
|
|
3808
|
+
* @@@
|
|
3809
|
+
*
|
|
3810
|
+
* Note: It is usefull @@@
|
|
3811
|
+
*
|
|
3812
|
+
* @param pipeline
|
|
3813
|
+
* @public exported from `@promptbook/utils`
|
|
3814
|
+
*/
|
|
3815
|
+
function clonePipeline(pipeline) {
|
|
3816
|
+
// Note: Not using spread operator (...) because @@@
|
|
3817
|
+
var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
|
|
3818
|
+
return {
|
|
3819
|
+
pipelineUrl: pipelineUrl,
|
|
3820
|
+
sourceFile: sourceFile,
|
|
3821
|
+
title: title,
|
|
3822
|
+
promptbookVersion: promptbookVersion,
|
|
3823
|
+
description: description,
|
|
3824
|
+
parameters: parameters,
|
|
3825
|
+
promptTemplates: promptTemplates,
|
|
3826
|
+
knowledgeSources: knowledgeSources,
|
|
3827
|
+
knowledgePieces: knowledgePieces,
|
|
3828
|
+
personas: personas,
|
|
3829
|
+
preparations: preparations,
|
|
3830
|
+
};
|
|
3831
|
+
}
|
|
3832
|
+
/**
|
|
3833
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
3834
|
+
*/
|
|
3835
|
+
|
|
3805
3836
|
/**
|
|
3806
3837
|
* @@@
|
|
3807
3838
|
*
|
|
@@ -3854,40 +3885,12 @@
|
|
|
3854
3885
|
* TODO: [🧠][🥜]
|
|
3855
3886
|
*/
|
|
3856
3887
|
|
|
3857
|
-
/**
|
|
3858
|
-
* @@@
|
|
3859
|
-
*
|
|
3860
|
-
* Note: It is usefull @@@
|
|
3861
|
-
*
|
|
3862
|
-
* @param pipeline
|
|
3863
|
-
* @public exported from `@promptbook/utils`
|
|
3864
|
-
*/
|
|
3865
|
-
function clonePipeline(pipeline) {
|
|
3866
|
-
// Note: Not using spread operator (...) because @@@
|
|
3867
|
-
var pipelineUrl = pipeline.pipelineUrl, sourceFile = pipeline.sourceFile, title = pipeline.title, promptbookVersion = pipeline.promptbookVersion, description = pipeline.description, parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, knowledgePieces = pipeline.knowledgePieces, personas = pipeline.personas, preparations = pipeline.preparations;
|
|
3868
|
-
return {
|
|
3869
|
-
pipelineUrl: pipelineUrl,
|
|
3870
|
-
sourceFile: sourceFile,
|
|
3871
|
-
title: title,
|
|
3872
|
-
promptbookVersion: promptbookVersion,
|
|
3873
|
-
description: description,
|
|
3874
|
-
parameters: parameters,
|
|
3875
|
-
promptTemplates: promptTemplates,
|
|
3876
|
-
knowledgeSources: knowledgeSources,
|
|
3877
|
-
knowledgePieces: knowledgePieces,
|
|
3878
|
-
personas: personas,
|
|
3879
|
-
preparations: preparations,
|
|
3880
|
-
};
|
|
3881
|
-
}
|
|
3882
|
-
/**
|
|
3883
|
-
* TODO: [🍙] Make some standart order of json properties
|
|
3884
|
-
*/
|
|
3885
|
-
|
|
3886
3888
|
/**
|
|
3887
3889
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
3888
3890
|
*
|
|
3889
3891
|
* Note: This function does not validate logic of the pipeline
|
|
3890
3892
|
* Note: This function acts as part of compilation process
|
|
3893
|
+
* Note: When the pipeline is already prepared, it returns the same pipeline
|
|
3891
3894
|
* @public exported from `@promptbook/core`
|
|
3892
3895
|
*/
|
|
3893
3896
|
function preparePipeline(pipeline, options) {
|
|
@@ -3902,6 +3905,9 @@
|
|
|
3902
3905
|
return __generator(this, function (_c) {
|
|
3903
3906
|
switch (_c.label) {
|
|
3904
3907
|
case 0:
|
|
3908
|
+
if (isPipelinePrepared(pipeline)) {
|
|
3909
|
+
return [2 /*return*/, pipeline];
|
|
3910
|
+
}
|
|
3905
3911
|
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? false : _b;
|
|
3906
3912
|
parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3907
3913
|
llmToolsWithUsage = countTotalUsage(llmTools);
|
|
@@ -4035,34 +4041,37 @@
|
|
|
4035
4041
|
*/
|
|
4036
4042
|
parse: function (input) {
|
|
4037
4043
|
var args = input.args;
|
|
4038
|
-
var
|
|
4039
|
-
if (
|
|
4044
|
+
var sourceContent = spaceTrim__default["default"](args[0] || '');
|
|
4045
|
+
if (sourceContent === '') {
|
|
4040
4046
|
throw new ParsingError("Source is not defined");
|
|
4041
4047
|
}
|
|
4042
|
-
|
|
4048
|
+
// TODO: !!!! Following checks should be applied every link in the `sourceContent`
|
|
4049
|
+
if (sourceContent.startsWith('http://')) {
|
|
4043
4050
|
throw new ParsingError("Source is not secure");
|
|
4044
4051
|
}
|
|
4045
|
-
if (!(isValidFilePath(
|
|
4052
|
+
if (!(isValidFilePath(sourceContent) || isValidUrl(sourceContent))) {
|
|
4046
4053
|
throw new ParsingError("Source not valid");
|
|
4047
4054
|
}
|
|
4048
|
-
if (
|
|
4055
|
+
if (sourceContent.startsWith('../') || sourceContent.startsWith('/') || /^[A-Z]:[\\/]+/i.test(sourceContent)) {
|
|
4049
4056
|
throw new ParsingError("Source cannot be outside of the .ptbk.md folder");
|
|
4050
4057
|
}
|
|
4051
4058
|
return {
|
|
4052
4059
|
type: 'KNOWLEDGE',
|
|
4053
|
-
|
|
4060
|
+
sourceContent: sourceContent,
|
|
4054
4061
|
};
|
|
4055
4062
|
},
|
|
4056
4063
|
/**
|
|
4057
4064
|
* Note: Prototype of [🍧] (remove this comment after full implementation)
|
|
4058
4065
|
*/
|
|
4059
4066
|
applyToPipelineJson: function (personaCommand, subjects) {
|
|
4060
|
-
var
|
|
4067
|
+
var sourceContent = personaCommand.sourceContent;
|
|
4061
4068
|
var pipelineJson = subjects.pipelineJson;
|
|
4062
|
-
var name =
|
|
4069
|
+
var name = 'source-' + sha256__default["default"](hexEncoder__default["default"].parse(JSON.stringify(sourceContent))).toString( /* hex */);
|
|
4070
|
+
// <- TODO: [🥬] Encapsulate sha256 to some private utility function
|
|
4071
|
+
// <- TODO: This should be replaced with a better name later in preparation (done with some propper LLM summarization)
|
|
4063
4072
|
pipelineJson.knowledgeSources.push({
|
|
4064
4073
|
name: name,
|
|
4065
|
-
|
|
4074
|
+
sourceContent: sourceContent,
|
|
4066
4075
|
});
|
|
4067
4076
|
},
|
|
4068
4077
|
};
|
|
@@ -4255,7 +4264,7 @@
|
|
|
4255
4264
|
'KNOWLEDGE',
|
|
4256
4265
|
'INSTRUMENT',
|
|
4257
4266
|
'ACTION',
|
|
4258
|
-
// <- [
|
|
4267
|
+
// <- [🅱]
|
|
4259
4268
|
];
|
|
4260
4269
|
|
|
4261
4270
|
/**
|
|
@@ -4282,7 +4291,7 @@
|
|
|
4282
4291
|
'KNOWLEDGE',
|
|
4283
4292
|
'INSTRUMENT',
|
|
4284
4293
|
'ACTION',
|
|
4285
|
-
// <- [
|
|
4294
|
+
// <- [🅱]
|
|
4286
4295
|
],
|
|
4287
4296
|
/**
|
|
4288
4297
|
* Aliases for the BLOCK command
|
|
@@ -5748,7 +5757,7 @@
|
|
|
5748
5757
|
if (command.blockType === 'KNOWLEDGE') {
|
|
5749
5758
|
knowledgeCommandParser.applyToPipelineJson({
|
|
5750
5759
|
type: 'KNOWLEDGE',
|
|
5751
|
-
|
|
5760
|
+
sourceContent: content, // <- TODO: [🐝] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
|
|
5752
5761
|
}, {
|
|
5753
5762
|
pipelineJson: pipelineJson,
|
|
5754
5763
|
templateJson: templateJson,
|
|
@@ -6340,7 +6349,7 @@
|
|
|
6340
6349
|
}
|
|
6341
6350
|
else {
|
|
6342
6351
|
existing = collection.get(pipeline.pipelineUrl);
|
|
6343
|
-
throw new ReferenceError(spaceTrim__default["default"]("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
|
|
6352
|
+
throw new ReferenceError(spaceTrim__default["default"]("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4F\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
|
|
6344
6353
|
}
|
|
6345
6354
|
}
|
|
6346
6355
|
}
|
|
@@ -6502,6 +6511,7 @@
|
|
|
6502
6511
|
FilesStorage.prototype.getFilenameForKey = function (key) {
|
|
6503
6512
|
var name = titleToName(key);
|
|
6504
6513
|
var hash = sha256__default["default"](hexEncoder__default["default"].parse(name)).toString( /* hex */);
|
|
6514
|
+
// <- TODO: [🥬] Encapsulate sha256 to some private utility function
|
|
6505
6515
|
return path.join.apply(void 0, __spreadArray(__spreadArray([this.options.cacheFolderPath], __read(nameToSubfolderPath(hash /* <- TODO: [🎎] Maybe add some SHA256 prefix */)), false), ["".concat(name.substring(0, MAX_FILENAME_LENGTH), ".json")], false));
|
|
6506
6516
|
};
|
|
6507
6517
|
/**
|
|
@@ -6923,6 +6933,7 @@
|
|
|
6923
6933
|
* TODO: Maybe make custom OpenaiError
|
|
6924
6934
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
6925
6935
|
* TODO: [🍜] Auto use anonymous server in browser
|
|
6936
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
6926
6937
|
*/
|
|
6927
6938
|
|
|
6928
6939
|
/**
|
|
@@ -7276,6 +7287,255 @@
|
|
|
7276
7287
|
* TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
|
|
7277
7288
|
*/
|
|
7278
7289
|
|
|
7290
|
+
/**
|
|
7291
|
+
* Execution Tools for calling Azure OpenAI API.
|
|
7292
|
+
*
|
|
7293
|
+
* @public exported from `@promptbook/azure-openai`
|
|
7294
|
+
*/
|
|
7295
|
+
var AzureOpenAiExecutionTools = /** @class */ (function () {
|
|
7296
|
+
/**
|
|
7297
|
+
* Creates OpenAI Execution Tools.
|
|
7298
|
+
*
|
|
7299
|
+
* @param options which are relevant are directly passed to the OpenAI client
|
|
7300
|
+
*/
|
|
7301
|
+
function AzureOpenAiExecutionTools(options) {
|
|
7302
|
+
this.options = options;
|
|
7303
|
+
this.client = new openai.OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(options.apiKey));
|
|
7304
|
+
}
|
|
7305
|
+
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
|
|
7306
|
+
get: function () {
|
|
7307
|
+
return 'Azure OpenAI';
|
|
7308
|
+
},
|
|
7309
|
+
enumerable: false,
|
|
7310
|
+
configurable: true
|
|
7311
|
+
});
|
|
7312
|
+
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
|
|
7313
|
+
get: function () {
|
|
7314
|
+
return 'Use all models trained by OpenAI provided by Azure';
|
|
7315
|
+
},
|
|
7316
|
+
enumerable: false,
|
|
7317
|
+
configurable: true
|
|
7318
|
+
});
|
|
7319
|
+
/**
|
|
7320
|
+
* Calls OpenAI API to use a chat model.
|
|
7321
|
+
*/
|
|
7322
|
+
AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7323
|
+
var _a, _b;
|
|
7324
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7325
|
+
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
7326
|
+
var _c;
|
|
7327
|
+
return __generator(this, function (_d) {
|
|
7328
|
+
switch (_d.label) {
|
|
7329
|
+
case 0:
|
|
7330
|
+
if (this.options.isVerbose) {
|
|
7331
|
+
console.info('💬 OpenAI callChatModel call');
|
|
7332
|
+
}
|
|
7333
|
+
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7334
|
+
// TODO: [☂] Use here more modelRequirements
|
|
7335
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7336
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7337
|
+
}
|
|
7338
|
+
_d.label = 1;
|
|
7339
|
+
case 1:
|
|
7340
|
+
_d.trys.push([1, 3, , 4]);
|
|
7341
|
+
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7342
|
+
modelSettings = {
|
|
7343
|
+
maxTokens: modelRequirements.maxTokens,
|
|
7344
|
+
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7345
|
+
temperature: modelRequirements.temperature,
|
|
7346
|
+
user: this.options.user,
|
|
7347
|
+
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7348
|
+
// <- Note: [🧆]
|
|
7349
|
+
};
|
|
7350
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7351
|
+
messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
7352
|
+
? []
|
|
7353
|
+
: [
|
|
7354
|
+
{
|
|
7355
|
+
role: 'system',
|
|
7356
|
+
content: modelRequirements.systemMessage,
|
|
7357
|
+
},
|
|
7358
|
+
])), false), [
|
|
7359
|
+
{
|
|
7360
|
+
role: 'user',
|
|
7361
|
+
content: rawPromptContent,
|
|
7362
|
+
},
|
|
7363
|
+
], false);
|
|
7364
|
+
start = getCurrentIsoDate();
|
|
7365
|
+
complete = void 0;
|
|
7366
|
+
if (this.options.isVerbose) {
|
|
7367
|
+
console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
|
|
7368
|
+
}
|
|
7369
|
+
rawRequest = [modelName, messages, modelSettings];
|
|
7370
|
+
return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
|
|
7371
|
+
case 2:
|
|
7372
|
+
rawResponse = _d.sent();
|
|
7373
|
+
if (this.options.isVerbose) {
|
|
7374
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7375
|
+
}
|
|
7376
|
+
if (!rawResponse.choices[0]) {
|
|
7377
|
+
throw new PipelineExecutionError('No choises from Azure OpenAI');
|
|
7378
|
+
}
|
|
7379
|
+
if (rawResponse.choices.length > 1) {
|
|
7380
|
+
// TODO: This should be maybe only warning
|
|
7381
|
+
throw new PipelineExecutionError('More than one choise from Azure OpenAI');
|
|
7382
|
+
}
|
|
7383
|
+
if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
|
|
7384
|
+
throw new PipelineExecutionError('Empty response from Azure OpenAI');
|
|
7385
|
+
}
|
|
7386
|
+
resultContent = rawResponse.choices[0].message.content;
|
|
7387
|
+
// eslint-disable-next-line prefer-const
|
|
7388
|
+
complete = getCurrentIsoDate();
|
|
7389
|
+
usage = {
|
|
7390
|
+
price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
|
|
7391
|
+
input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
|
|
7392
|
+
output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
|
|
7393
|
+
};
|
|
7394
|
+
return [2 /*return*/, {
|
|
7395
|
+
content: resultContent,
|
|
7396
|
+
modelName: modelName,
|
|
7397
|
+
timing: {
|
|
7398
|
+
start: start,
|
|
7399
|
+
complete: complete,
|
|
7400
|
+
},
|
|
7401
|
+
usage: usage,
|
|
7402
|
+
rawPromptContent: rawPromptContent,
|
|
7403
|
+
rawRequest: rawRequest,
|
|
7404
|
+
rawResponse: rawResponse,
|
|
7405
|
+
// <- [🗯]
|
|
7406
|
+
}];
|
|
7407
|
+
case 3:
|
|
7408
|
+
error_1 = _d.sent();
|
|
7409
|
+
throw this.transformAzureError(error_1);
|
|
7410
|
+
case 4: return [2 /*return*/];
|
|
7411
|
+
}
|
|
7412
|
+
});
|
|
7413
|
+
});
|
|
7414
|
+
};
|
|
7415
|
+
/**
|
|
7416
|
+
* Calls Azure OpenAI API to use a complete model.
|
|
7417
|
+
*/
|
|
7418
|
+
AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
7419
|
+
var _a, _b;
|
|
7420
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7421
|
+
var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
7422
|
+
var _c;
|
|
7423
|
+
return __generator(this, function (_d) {
|
|
7424
|
+
switch (_d.label) {
|
|
7425
|
+
case 0:
|
|
7426
|
+
if (this.options.isVerbose) {
|
|
7427
|
+
console.info('🖋 OpenAI callCompletionModel call');
|
|
7428
|
+
}
|
|
7429
|
+
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7430
|
+
// TODO: [☂] Use here more modelRequirements
|
|
7431
|
+
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7432
|
+
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
7433
|
+
}
|
|
7434
|
+
_d.label = 1;
|
|
7435
|
+
case 1:
|
|
7436
|
+
_d.trys.push([1, 3, , 4]);
|
|
7437
|
+
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7438
|
+
modelSettings = {
|
|
7439
|
+
maxTokens: modelRequirements.maxTokens || 2000,
|
|
7440
|
+
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7441
|
+
temperature: modelRequirements.temperature,
|
|
7442
|
+
user: this.options.user,
|
|
7443
|
+
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7444
|
+
// <- Note: [🧆]
|
|
7445
|
+
};
|
|
7446
|
+
start = getCurrentIsoDate();
|
|
7447
|
+
complete = void 0;
|
|
7448
|
+
if (this.options.isVerbose) {
|
|
7449
|
+
console.info(colors__default["default"].bgWhite('content'), JSON.stringify(content, null, 4));
|
|
7450
|
+
console.info(colors__default["default"].bgWhite('parameters'), JSON.stringify(parameters, null, 4));
|
|
7451
|
+
}
|
|
7452
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7453
|
+
rawRequest = [
|
|
7454
|
+
modelName,
|
|
7455
|
+
[rawPromptContent],
|
|
7456
|
+
modelSettings,
|
|
7457
|
+
];
|
|
7458
|
+
return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
|
|
7459
|
+
case 2:
|
|
7460
|
+
rawResponse = _d.sent();
|
|
7461
|
+
if (this.options.isVerbose) {
|
|
7462
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7463
|
+
}
|
|
7464
|
+
if (!rawResponse.choices[0]) {
|
|
7465
|
+
throw new PipelineExecutionError('No choises from OpenAI');
|
|
7466
|
+
}
|
|
7467
|
+
if (rawResponse.choices.length > 1) {
|
|
7468
|
+
// TODO: This should be maybe only warning
|
|
7469
|
+
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
7470
|
+
}
|
|
7471
|
+
resultContent = rawResponse.choices[0].text;
|
|
7472
|
+
// eslint-disable-next-line prefer-const
|
|
7473
|
+
complete = getCurrentIsoDate();
|
|
7474
|
+
usage = {
|
|
7475
|
+
price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
|
|
7476
|
+
input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
|
|
7477
|
+
output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
|
|
7478
|
+
};
|
|
7479
|
+
return [2 /*return*/, {
|
|
7480
|
+
content: resultContent,
|
|
7481
|
+
modelName: modelName,
|
|
7482
|
+
timing: {
|
|
7483
|
+
start: start,
|
|
7484
|
+
complete: complete,
|
|
7485
|
+
},
|
|
7486
|
+
usage: usage,
|
|
7487
|
+
rawPromptContent: rawPromptContent,
|
|
7488
|
+
rawRequest: rawRequest,
|
|
7489
|
+
rawResponse: rawResponse,
|
|
7490
|
+
// <- [🗯]
|
|
7491
|
+
}];
|
|
7492
|
+
case 3:
|
|
7493
|
+
error_2 = _d.sent();
|
|
7494
|
+
throw this.transformAzureError(error_2);
|
|
7495
|
+
case 4: return [2 /*return*/];
|
|
7496
|
+
}
|
|
7497
|
+
});
|
|
7498
|
+
});
|
|
7499
|
+
};
|
|
7500
|
+
// <- Note: [🤖] callXxxModel
|
|
7501
|
+
/**
|
|
7502
|
+
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
7503
|
+
*/
|
|
7504
|
+
AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
|
|
7505
|
+
if (typeof azureError !== 'object' || azureError === null) {
|
|
7506
|
+
return new PipelineExecutionError("Unknown Azure OpenAI error");
|
|
7507
|
+
}
|
|
7508
|
+
var code = azureError.code, message = azureError.message;
|
|
7509
|
+
return new PipelineExecutionError("".concat(code, ": ").concat(message));
|
|
7510
|
+
};
|
|
7511
|
+
/**
|
|
7512
|
+
* List all available Azure OpenAI models that can be used
|
|
7513
|
+
*/
|
|
7514
|
+
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
7515
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7516
|
+
return __generator(this, function (_a) {
|
|
7517
|
+
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
7518
|
+
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
7519
|
+
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
7520
|
+
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
7521
|
+
return ({
|
|
7522
|
+
modelTitle: "Azure ".concat(modelTitle),
|
|
7523
|
+
modelName: modelName,
|
|
7524
|
+
modelVariant: modelVariant,
|
|
7525
|
+
});
|
|
7526
|
+
})];
|
|
7527
|
+
});
|
|
7528
|
+
});
|
|
7529
|
+
};
|
|
7530
|
+
return AzureOpenAiExecutionTools;
|
|
7531
|
+
}());
|
|
7532
|
+
/**
|
|
7533
|
+
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
7534
|
+
* TODO: Maybe make custom AzureOpenaiError
|
|
7535
|
+
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
7536
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
7537
|
+
*/
|
|
7538
|
+
|
|
7279
7539
|
/**
|
|
7280
7540
|
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
7281
7541
|
*
|
|
@@ -7612,12 +7872,56 @@
|
|
|
7612
7872
|
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
7613
7873
|
* TODO: Maybe make custom OpenaiError
|
|
7614
7874
|
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
7875
|
+
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
7876
|
+
*/
|
|
7877
|
+
|
|
7878
|
+
/**
|
|
7879
|
+
* @private internal type for `createLlmToolsFromConfiguration`
|
|
7880
|
+
*/
|
|
7881
|
+
var EXECUTION_TOOLS_CLASSES = {
|
|
7882
|
+
getOpenAiExecutionTools: function (options) {
|
|
7883
|
+
return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
|
|
7884
|
+
},
|
|
7885
|
+
getAnthropicClaudeExecutionTools: function (options) { return new AnthropicClaudeExecutionTools(options); },
|
|
7886
|
+
getAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
|
|
7887
|
+
// <- Note: [🦑] Add here new LLM provider
|
|
7888
|
+
};
|
|
7889
|
+
/**
|
|
7890
|
+
* TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
|
|
7891
|
+
* TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
|
|
7615
7892
|
*/
|
|
7616
7893
|
|
|
7617
7894
|
/**
|
|
7618
7895
|
* @@@
|
|
7619
7896
|
*
|
|
7620
|
-
* Note: This function is not cached, every call creates new instance of `
|
|
7897
|
+
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
7898
|
+
*
|
|
7899
|
+
* @returns @@@
|
|
7900
|
+
* @public exported from `@promptbook/node`
|
|
7901
|
+
*/
|
|
7902
|
+
function createLlmToolsFromConfiguration(configuration, options) {
|
|
7903
|
+
if (options === void 0) { options = {}; }
|
|
7904
|
+
if (!isRunningInNode()) {
|
|
7905
|
+
throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
|
|
7906
|
+
}
|
|
7907
|
+
var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
|
|
7908
|
+
dotenv__namespace.config();
|
|
7909
|
+
var llmTools = configuration.map(function (llmConfiguration) {
|
|
7910
|
+
return EXECUTION_TOOLS_CLASSES["get".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
|
|
7911
|
+
});
|
|
7912
|
+
return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
|
|
7913
|
+
}
|
|
7914
|
+
/**
|
|
7915
|
+
* TODO: [🧠][🎌] Dynamically install required providers
|
|
7916
|
+
* TODO: @@@ write discussion about this - wizzard
|
|
7917
|
+
* TODO: [🧠][🍛] Which name is better `createLlmToolsFromConfig` or `createLlmToolsFromConfiguration`?
|
|
7918
|
+
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
7919
|
+
* Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
|
|
7920
|
+
* TODO: This should be maybe not under `_common` but under `utils`
|
|
7921
|
+
*/
|
|
7922
|
+
|
|
7923
|
+
/**
|
|
7924
|
+
* @@@
|
|
7621
7925
|
*
|
|
7622
7926
|
* @@@ .env
|
|
7623
7927
|
*
|
|
@@ -7628,46 +7932,77 @@
|
|
|
7628
7932
|
* @returns @@@
|
|
7629
7933
|
* @public exported from `@promptbook/node`
|
|
7630
7934
|
*/
|
|
7631
|
-
function
|
|
7632
|
-
if (options === void 0) { options = {}; }
|
|
7935
|
+
function createLlmToolsFromConfigurationFromEnv() {
|
|
7633
7936
|
if (!isRunningInNode()) {
|
|
7634
7937
|
throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
|
|
7635
7938
|
}
|
|
7636
|
-
var
|
|
7637
|
-
dotenv__namespace.config();
|
|
7638
|
-
var llmTools = [];
|
|
7939
|
+
var llmToolsConfiguration = [];
|
|
7639
7940
|
if (typeof process.env.OPENAI_API_KEY === 'string') {
|
|
7640
|
-
|
|
7641
|
-
|
|
7642
|
-
|
|
7643
|
-
|
|
7941
|
+
llmToolsConfiguration.push({
|
|
7942
|
+
title: 'OpenAI (from env)',
|
|
7943
|
+
packageName: '@promptbook/openai',
|
|
7944
|
+
className: 'OpenAiExecutionTools',
|
|
7945
|
+
options: {
|
|
7946
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
7947
|
+
},
|
|
7948
|
+
});
|
|
7644
7949
|
}
|
|
7645
7950
|
if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
|
|
7646
|
-
|
|
7647
|
-
|
|
7648
|
-
|
|
7649
|
-
|
|
7650
|
-
|
|
7651
|
-
|
|
7652
|
-
|
|
7951
|
+
llmToolsConfiguration.push({
|
|
7952
|
+
title: 'Claude (from env)',
|
|
7953
|
+
packageName: '@promptbook/antrhopic-claude',
|
|
7954
|
+
className: 'AnthropicClaudeExecutionTools',
|
|
7955
|
+
options: {
|
|
7956
|
+
apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
|
|
7957
|
+
},
|
|
7958
|
+
});
|
|
7653
7959
|
}
|
|
7654
|
-
|
|
7655
|
-
|
|
7960
|
+
// <- Note: [🦑] Add here new LLM provider
|
|
7961
|
+
return llmToolsConfiguration;
|
|
7962
|
+
}
|
|
7963
|
+
/**
|
|
7964
|
+
* TODO: Add Azure OpenAI
|
|
7965
|
+
* TODO: [🧠][🍛]
|
|
7966
|
+
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
7967
|
+
* Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
|
|
7968
|
+
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
7969
|
+
* TODO: This should be maybe not under `_common` but under `utils`
|
|
7970
|
+
* TODO: [🧠] Maybe pass env as argument
|
|
7971
|
+
*/
|
|
7972
|
+
|
|
7973
|
+
/**
|
|
7974
|
+
* @@@
|
|
7975
|
+
*
|
|
7976
|
+
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
7977
|
+
*
|
|
7978
|
+
* @@@ .env
|
|
7979
|
+
*
|
|
7980
|
+
* It looks for environment variables:
|
|
7981
|
+
* - `process.env.OPENAI_API_KEY`
|
|
7982
|
+
* - `process.env.ANTHROPIC_CLAUDE_API_KEY`
|
|
7983
|
+
*
|
|
7984
|
+
* @returns @@@
|
|
7985
|
+
* @public exported from `@promptbook/node`
|
|
7986
|
+
*/
|
|
7987
|
+
function createLlmToolsFromEnv(options) {
|
|
7988
|
+
if (options === void 0) { options = {}; }
|
|
7989
|
+
if (!isRunningInNode()) {
|
|
7990
|
+
throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
|
|
7656
7991
|
}
|
|
7657
|
-
|
|
7658
|
-
|
|
7992
|
+
var configuration = createLlmToolsFromConfigurationFromEnv();
|
|
7993
|
+
if (configuration.length === 0) {
|
|
7994
|
+
// TODO: [🥃]
|
|
7995
|
+
throw new Error(spaceTrim__default["default"]("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
|
|
7659
7996
|
}
|
|
7997
|
+
return createLlmToolsFromConfiguration(configuration, options);
|
|
7660
7998
|
}
|
|
7661
7999
|
/**
|
|
7662
|
-
* TODO:
|
|
7663
|
-
* TODO:
|
|
7664
|
-
* TODO: Add Azure
|
|
7665
|
-
* TODO: [🧠] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
|
|
8000
|
+
* TODO: @@@ write `createLlmToolsFromEnv` vs `createLlmToolsFromConfigurationFromEnv` vs `createLlmToolsFromConfiguration`
|
|
8001
|
+
* TODO: [🧠][🍛] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
|
|
7666
8002
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
7667
|
-
* TODO: [🧠] Maybe pass env as argument
|
|
7668
8003
|
* Note: [🟢] This code should never be published outside of `@promptbook/node` and `@promptbook/cli` and `@promptbook/cli`
|
|
7669
|
-
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
7670
8004
|
* TODO: [🥃] Allow `ptbk make` without llm tools
|
|
8005
|
+
* TODO: This should be maybe not under `_common` but under `utils`
|
|
7671
8006
|
*/
|
|
7672
8007
|
|
|
7673
8008
|
/**
|
|
@@ -7864,6 +8199,7 @@
|
|
|
7864
8199
|
* Note: [🟡] This code should never be published outside of `@promptbook/cli`
|
|
7865
8200
|
* TODO: [👷♂️] @@@ Manual about construction of llmTools
|
|
7866
8201
|
* TODO: [🥃] Allow `ptbk make` without llm tools
|
|
8202
|
+
* TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
|
|
7867
8203
|
*/
|
|
7868
8204
|
|
|
7869
8205
|
/**
|