@promptbook/cli 0.66.0-6 → 0.66.0-7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/promptbook-cli.js +2 -2
- package/esm/index.es.js +318 -169
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/cli.index.d.ts +2 -2
- package/esm/typings/src/cli/main.d.ts +2 -2
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
- package/package.json +1 -1
- package/umd/index.umd.js +318 -169
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -39,7 +39,7 @@
|
|
|
39
39
|
/**
|
|
40
40
|
* The version of the Promptbook library
|
|
41
41
|
*/
|
|
42
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
42
|
+
var PROMPTBOOK_VERSION = '0.66.0-6';
|
|
43
43
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
44
44
|
|
|
45
45
|
/*! *****************************************************************************
|
|
@@ -866,7 +866,7 @@
|
|
|
866
866
|
});
|
|
867
867
|
}
|
|
868
868
|
|
|
869
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-
|
|
869
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
870
870
|
|
|
871
871
|
/**
|
|
872
872
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -2089,6 +2089,60 @@
|
|
|
2089
2089
|
enumerable: false,
|
|
2090
2090
|
configurable: true
|
|
2091
2091
|
});
|
|
2092
|
+
/**
|
|
2093
|
+
* Check the configuration of all execution tools
|
|
2094
|
+
*/
|
|
2095
|
+
MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
2096
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2097
|
+
return __generator(this, function (_a) {
|
|
2098
|
+
return [2 /*return*/];
|
|
2099
|
+
});
|
|
2100
|
+
});
|
|
2101
|
+
};
|
|
2102
|
+
/**
|
|
2103
|
+
* List all available models that can be used
|
|
2104
|
+
* This lists is a combination of all available models from all execution tools
|
|
2105
|
+
*/
|
|
2106
|
+
MultipleLlmExecutionTools.prototype.listModels = function () {
|
|
2107
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2108
|
+
var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
|
|
2109
|
+
var e_1, _c;
|
|
2110
|
+
return __generator(this, function (_d) {
|
|
2111
|
+
switch (_d.label) {
|
|
2112
|
+
case 0:
|
|
2113
|
+
availableModels = [];
|
|
2114
|
+
_d.label = 1;
|
|
2115
|
+
case 1:
|
|
2116
|
+
_d.trys.push([1, 6, 7, 8]);
|
|
2117
|
+
_a = __values(this.llmExecutionTools), _b = _a.next();
|
|
2118
|
+
_d.label = 2;
|
|
2119
|
+
case 2:
|
|
2120
|
+
if (!!_b.done) return [3 /*break*/, 5];
|
|
2121
|
+
llmExecutionTools = _b.value;
|
|
2122
|
+
return [4 /*yield*/, llmExecutionTools.listModels()];
|
|
2123
|
+
case 3:
|
|
2124
|
+
models = _d.sent();
|
|
2125
|
+
availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
|
|
2126
|
+
_d.label = 4;
|
|
2127
|
+
case 4:
|
|
2128
|
+
_b = _a.next();
|
|
2129
|
+
return [3 /*break*/, 2];
|
|
2130
|
+
case 5: return [3 /*break*/, 8];
|
|
2131
|
+
case 6:
|
|
2132
|
+
e_1_1 = _d.sent();
|
|
2133
|
+
e_1 = { error: e_1_1 };
|
|
2134
|
+
return [3 /*break*/, 8];
|
|
2135
|
+
case 7:
|
|
2136
|
+
try {
|
|
2137
|
+
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
2138
|
+
}
|
|
2139
|
+
finally { if (e_1) throw e_1.error; }
|
|
2140
|
+
return [7 /*endfinally*/];
|
|
2141
|
+
case 8: return [2 /*return*/, availableModels];
|
|
2142
|
+
}
|
|
2143
|
+
});
|
|
2144
|
+
});
|
|
2145
|
+
};
|
|
2092
2146
|
/**
|
|
2093
2147
|
* Calls the best available chat model
|
|
2094
2148
|
*/
|
|
@@ -2115,8 +2169,8 @@
|
|
|
2115
2169
|
*/
|
|
2116
2170
|
MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
2117
2171
|
return __awaiter(this, void 0, void 0, function () {
|
|
2118
|
-
var errors, _a, _b, llmExecutionTools, _c, error_1,
|
|
2119
|
-
var
|
|
2172
|
+
var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
|
|
2173
|
+
var e_2, _d;
|
|
2120
2174
|
var _this = this;
|
|
2121
2175
|
return __generator(this, function (_e) {
|
|
2122
2176
|
switch (_e.label) {
|
|
@@ -2172,14 +2226,14 @@
|
|
|
2172
2226
|
return [3 /*break*/, 2];
|
|
2173
2227
|
case 14: return [3 /*break*/, 17];
|
|
2174
2228
|
case 15:
|
|
2175
|
-
|
|
2176
|
-
|
|
2229
|
+
e_2_1 = _e.sent();
|
|
2230
|
+
e_2 = { error: e_2_1 };
|
|
2177
2231
|
return [3 /*break*/, 17];
|
|
2178
2232
|
case 16:
|
|
2179
2233
|
try {
|
|
2180
2234
|
if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
|
|
2181
2235
|
}
|
|
2182
|
-
finally { if (
|
|
2236
|
+
finally { if (e_2) throw e_2.error; }
|
|
2183
2237
|
return [7 /*endfinally*/];
|
|
2184
2238
|
case 17:
|
|
2185
2239
|
if (errors.length === 1) {
|
|
@@ -2207,50 +2261,6 @@
|
|
|
2207
2261
|
});
|
|
2208
2262
|
});
|
|
2209
2263
|
};
|
|
2210
|
-
/**
|
|
2211
|
-
* List all available models that can be used
|
|
2212
|
-
* This lists is a combination of all available models from all execution tools
|
|
2213
|
-
*/
|
|
2214
|
-
MultipleLlmExecutionTools.prototype.listModels = function () {
|
|
2215
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
2216
|
-
var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
|
|
2217
|
-
var e_2, _c;
|
|
2218
|
-
return __generator(this, function (_d) {
|
|
2219
|
-
switch (_d.label) {
|
|
2220
|
-
case 0:
|
|
2221
|
-
availableModels = [];
|
|
2222
|
-
_d.label = 1;
|
|
2223
|
-
case 1:
|
|
2224
|
-
_d.trys.push([1, 6, 7, 8]);
|
|
2225
|
-
_a = __values(this.llmExecutionTools), _b = _a.next();
|
|
2226
|
-
_d.label = 2;
|
|
2227
|
-
case 2:
|
|
2228
|
-
if (!!_b.done) return [3 /*break*/, 5];
|
|
2229
|
-
llmExecutionTools = _b.value;
|
|
2230
|
-
return [4 /*yield*/, llmExecutionTools.listModels()];
|
|
2231
|
-
case 3:
|
|
2232
|
-
models = _d.sent();
|
|
2233
|
-
availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
|
|
2234
|
-
_d.label = 4;
|
|
2235
|
-
case 4:
|
|
2236
|
-
_b = _a.next();
|
|
2237
|
-
return [3 /*break*/, 2];
|
|
2238
|
-
case 5: return [3 /*break*/, 8];
|
|
2239
|
-
case 6:
|
|
2240
|
-
e_2_1 = _d.sent();
|
|
2241
|
-
e_2 = { error: e_2_1 };
|
|
2242
|
-
return [3 /*break*/, 8];
|
|
2243
|
-
case 7:
|
|
2244
|
-
try {
|
|
2245
|
-
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
2246
|
-
}
|
|
2247
|
-
finally { if (e_2) throw e_2.error; }
|
|
2248
|
-
return [7 /*endfinally*/];
|
|
2249
|
-
case 8: return [2 /*return*/, availableModels];
|
|
2250
|
-
}
|
|
2251
|
-
});
|
|
2252
|
-
});
|
|
2253
|
-
};
|
|
2254
2264
|
return MultipleLlmExecutionTools;
|
|
2255
2265
|
}());
|
|
2256
2266
|
/**
|
|
@@ -3685,6 +3695,13 @@
|
|
|
3685
3695
|
// TODO: [🧠] Maybe put here some suffix
|
|
3686
3696
|
return llmTools.description;
|
|
3687
3697
|
},
|
|
3698
|
+
checkConfiguration: function () {
|
|
3699
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
3700
|
+
return __generator(this, function (_a) {
|
|
3701
|
+
return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
|
|
3702
|
+
});
|
|
3703
|
+
});
|
|
3704
|
+
},
|
|
3688
3705
|
listModels: function () {
|
|
3689
3706
|
return /* not await */ llmTools.listModels();
|
|
3690
3707
|
},
|
|
@@ -6632,6 +6649,29 @@
|
|
|
6632
6649
|
enumerable: false,
|
|
6633
6650
|
configurable: true
|
|
6634
6651
|
});
|
|
6652
|
+
/**
|
|
6653
|
+
* Check the configuration of all execution tools
|
|
6654
|
+
*/
|
|
6655
|
+
RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
6656
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6657
|
+
return __generator(this, function (_a) {
|
|
6658
|
+
return [2 /*return*/];
|
|
6659
|
+
});
|
|
6660
|
+
});
|
|
6661
|
+
};
|
|
6662
|
+
/**
|
|
6663
|
+
* List all available models that can be used
|
|
6664
|
+
*/
|
|
6665
|
+
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
6666
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6667
|
+
return __generator(this, function (_a) {
|
|
6668
|
+
return [2 /*return*/, (this.options.models ||
|
|
6669
|
+
[
|
|
6670
|
+
/* !!!!!! */
|
|
6671
|
+
])];
|
|
6672
|
+
});
|
|
6673
|
+
});
|
|
6674
|
+
};
|
|
6635
6675
|
/**
|
|
6636
6676
|
* Creates a connection to the remote proxy server.
|
|
6637
6677
|
*/
|
|
@@ -6726,19 +6766,6 @@
|
|
|
6726
6766
|
});
|
|
6727
6767
|
});
|
|
6728
6768
|
};
|
|
6729
|
-
/**
|
|
6730
|
-
* List all available models that can be used
|
|
6731
|
-
*/
|
|
6732
|
-
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
6733
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6734
|
-
return __generator(this, function (_a) {
|
|
6735
|
-
return [2 /*return*/, (this.options.models ||
|
|
6736
|
-
[
|
|
6737
|
-
/* !!! */
|
|
6738
|
-
])];
|
|
6739
|
-
});
|
|
6740
|
-
});
|
|
6741
|
-
};
|
|
6742
6769
|
return RemoteLlmExecutionTools;
|
|
6743
6770
|
}());
|
|
6744
6771
|
/**
|
|
@@ -6935,12 +6962,10 @@
|
|
|
6935
6962
|
function AnthropicClaudeExecutionTools(options) {
|
|
6936
6963
|
if (options === void 0) { options = { isProxied: false }; }
|
|
6937
6964
|
this.options = options;
|
|
6938
|
-
|
|
6939
|
-
|
|
6940
|
-
|
|
6941
|
-
|
|
6942
|
-
this.client = new Anthropic__default["default"](anthropicOptions);
|
|
6943
|
-
// <- TODO: !!!!!! Lazy-load client
|
|
6965
|
+
/**
|
|
6966
|
+
* Anthropic Claude API client.
|
|
6967
|
+
*/
|
|
6968
|
+
this.client = null;
|
|
6944
6969
|
}
|
|
6945
6970
|
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
6946
6971
|
get: function () {
|
|
@@ -6956,12 +6981,47 @@
|
|
|
6956
6981
|
enumerable: false,
|
|
6957
6982
|
configurable: true
|
|
6958
6983
|
});
|
|
6984
|
+
AnthropicClaudeExecutionTools.prototype.getClient = function () {
|
|
6985
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6986
|
+
var anthropicOptions;
|
|
6987
|
+
return __generator(this, function (_a) {
|
|
6988
|
+
if (this.client === null) {
|
|
6989
|
+
anthropicOptions = __assign({}, this.options);
|
|
6990
|
+
delete anthropicOptions.isVerbose;
|
|
6991
|
+
delete anthropicOptions.isProxied;
|
|
6992
|
+
this.client = new Anthropic__default["default"](anthropicOptions);
|
|
6993
|
+
}
|
|
6994
|
+
return [2 /*return*/, this.client];
|
|
6995
|
+
});
|
|
6996
|
+
});
|
|
6997
|
+
};
|
|
6998
|
+
/**
|
|
6999
|
+
* Check the `options` passed to `constructor`
|
|
7000
|
+
*/
|
|
7001
|
+
AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
|
|
7002
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7003
|
+
return __generator(this, function (_a) {
|
|
7004
|
+
switch (_a.label) {
|
|
7005
|
+
case 0: return [4 /*yield*/, this.getClient()];
|
|
7006
|
+
case 1:
|
|
7007
|
+
_a.sent();
|
|
7008
|
+
return [2 /*return*/];
|
|
7009
|
+
}
|
|
7010
|
+
});
|
|
7011
|
+
});
|
|
7012
|
+
};
|
|
7013
|
+
/**
|
|
7014
|
+
* List all available Anthropic Claude models that can be used
|
|
7015
|
+
*/
|
|
7016
|
+
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
7017
|
+
return ANTHROPIC_CLAUDE_MODELS;
|
|
7018
|
+
};
|
|
6959
7019
|
/**
|
|
6960
7020
|
* Calls Anthropic Claude API to use a chat model.
|
|
6961
7021
|
*/
|
|
6962
7022
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6963
7023
|
return __awaiter(this, void 0, void 0, function () {
|
|
6964
|
-
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
|
|
7024
|
+
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
|
|
6965
7025
|
return __generator(this, function (_a) {
|
|
6966
7026
|
switch (_a.label) {
|
|
6967
7027
|
case 0:
|
|
@@ -6969,6 +7029,9 @@
|
|
|
6969
7029
|
console.info('💬 Anthropic Claude callChatModel call');
|
|
6970
7030
|
}
|
|
6971
7031
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7032
|
+
return [4 /*yield*/, this.getClient()];
|
|
7033
|
+
case 1:
|
|
7034
|
+
client = _a.sent();
|
|
6972
7035
|
// TODO: [☂] Use here more modelRequirements
|
|
6973
7036
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6974
7037
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
@@ -6995,8 +7058,8 @@
|
|
|
6995
7058
|
if (this.options.isVerbose) {
|
|
6996
7059
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
6997
7060
|
}
|
|
6998
|
-
return [4 /*yield*/,
|
|
6999
|
-
case
|
|
7061
|
+
return [4 /*yield*/, client.messages.create(rawRequest)];
|
|
7062
|
+
case 2:
|
|
7000
7063
|
rawResponse = _a.sent();
|
|
7001
7064
|
if (this.options.isVerbose) {
|
|
7002
7065
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -7127,13 +7190,6 @@
|
|
|
7127
7190
|
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
|
|
7128
7191
|
return this.getDefaultModel('claude-3-opus');
|
|
7129
7192
|
};
|
|
7130
|
-
// <- Note: [🤖] getDefaultXxxModel
|
|
7131
|
-
/**
|
|
7132
|
-
* List all available Anthropic Claude models that can be used
|
|
7133
|
-
*/
|
|
7134
|
-
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
7135
|
-
return ANTHROPIC_CLAUDE_MODELS;
|
|
7136
|
-
};
|
|
7137
7193
|
return AnthropicClaudeExecutionTools;
|
|
7138
7194
|
}());
|
|
7139
7195
|
/**
|
|
@@ -7540,10 +7596,10 @@
|
|
|
7540
7596
|
*/
|
|
7541
7597
|
function AzureOpenAiExecutionTools(options) {
|
|
7542
7598
|
this.options = options;
|
|
7543
|
-
|
|
7544
|
-
|
|
7545
|
-
|
|
7546
|
-
|
|
7599
|
+
/**
|
|
7600
|
+
* OpenAI Azure API client.
|
|
7601
|
+
*/
|
|
7602
|
+
this.client = null;
|
|
7547
7603
|
}
|
|
7548
7604
|
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
|
|
7549
7605
|
get: function () {
|
|
@@ -7559,28 +7615,74 @@
|
|
|
7559
7615
|
enumerable: false,
|
|
7560
7616
|
configurable: true
|
|
7561
7617
|
});
|
|
7618
|
+
AzureOpenAiExecutionTools.prototype.getClient = function () {
|
|
7619
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7620
|
+
return __generator(this, function (_a) {
|
|
7621
|
+
if (this.client === null) {
|
|
7622
|
+
this.client = new openai.OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(this.options.apiKey));
|
|
7623
|
+
}
|
|
7624
|
+
return [2 /*return*/, this.client];
|
|
7625
|
+
});
|
|
7626
|
+
});
|
|
7627
|
+
};
|
|
7628
|
+
/**
|
|
7629
|
+
* Check the `options` passed to `constructor`
|
|
7630
|
+
*/
|
|
7631
|
+
AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
7632
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7633
|
+
return __generator(this, function (_a) {
|
|
7634
|
+
switch (_a.label) {
|
|
7635
|
+
case 0: return [4 /*yield*/, this.getClient()];
|
|
7636
|
+
case 1:
|
|
7637
|
+
_a.sent();
|
|
7638
|
+
return [2 /*return*/];
|
|
7639
|
+
}
|
|
7640
|
+
});
|
|
7641
|
+
});
|
|
7642
|
+
};
|
|
7643
|
+
/**
|
|
7644
|
+
* List all available Azure OpenAI models that can be used
|
|
7645
|
+
*/
|
|
7646
|
+
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
7647
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7648
|
+
return __generator(this, function (_a) {
|
|
7649
|
+
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
7650
|
+
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
7651
|
+
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
7652
|
+
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
7653
|
+
return ({
|
|
7654
|
+
modelTitle: "Azure ".concat(modelTitle),
|
|
7655
|
+
modelName: modelName,
|
|
7656
|
+
modelVariant: modelVariant,
|
|
7657
|
+
});
|
|
7658
|
+
})];
|
|
7659
|
+
});
|
|
7660
|
+
});
|
|
7661
|
+
};
|
|
7562
7662
|
/**
|
|
7563
7663
|
* Calls OpenAI API to use a chat model.
|
|
7564
7664
|
*/
|
|
7565
7665
|
AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7566
7666
|
var _a, _b;
|
|
7567
7667
|
return __awaiter(this, void 0, void 0, function () {
|
|
7568
|
-
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
7569
|
-
|
|
7570
|
-
|
|
7571
|
-
switch (_d.label) {
|
|
7668
|
+
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
7669
|
+
return __generator(this, function (_c) {
|
|
7670
|
+
switch (_c.label) {
|
|
7572
7671
|
case 0:
|
|
7573
7672
|
if (this.options.isVerbose) {
|
|
7574
7673
|
console.info('💬 OpenAI callChatModel call');
|
|
7575
7674
|
}
|
|
7576
7675
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7676
|
+
return [4 /*yield*/, this.getClient()];
|
|
7677
|
+
case 1:
|
|
7678
|
+
client = _c.sent();
|
|
7577
7679
|
// TODO: [☂] Use here more modelRequirements
|
|
7578
7680
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7579
7681
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7580
7682
|
}
|
|
7581
|
-
|
|
7582
|
-
case
|
|
7583
|
-
|
|
7683
|
+
_c.label = 2;
|
|
7684
|
+
case 2:
|
|
7685
|
+
_c.trys.push([2, 4, , 5]);
|
|
7584
7686
|
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7585
7687
|
modelSettings = {
|
|
7586
7688
|
maxTokens: modelRequirements.maxTokens,
|
|
@@ -7610,9 +7712,9 @@
|
|
|
7610
7712
|
console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
|
|
7611
7713
|
}
|
|
7612
7714
|
rawRequest = [modelName, messages, modelSettings];
|
|
7613
|
-
return [4 /*yield*/,
|
|
7614
|
-
case
|
|
7615
|
-
rawResponse =
|
|
7715
|
+
return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
7716
|
+
case 3:
|
|
7717
|
+
rawResponse = _c.sent();
|
|
7616
7718
|
if (this.options.isVerbose) {
|
|
7617
7719
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7618
7720
|
}
|
|
@@ -7647,10 +7749,10 @@
|
|
|
7647
7749
|
rawResponse: rawResponse,
|
|
7648
7750
|
// <- [🗯]
|
|
7649
7751
|
}];
|
|
7650
|
-
case
|
|
7651
|
-
error_1 =
|
|
7752
|
+
case 4:
|
|
7753
|
+
error_1 = _c.sent();
|
|
7652
7754
|
throw this.transformAzureError(error_1);
|
|
7653
|
-
case
|
|
7755
|
+
case 5: return [2 /*return*/];
|
|
7654
7756
|
}
|
|
7655
7757
|
});
|
|
7656
7758
|
});
|
|
@@ -7661,22 +7763,24 @@
|
|
|
7661
7763
|
AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
7662
7764
|
var _a, _b;
|
|
7663
7765
|
return __awaiter(this, void 0, void 0, function () {
|
|
7664
|
-
var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
7665
|
-
|
|
7666
|
-
|
|
7667
|
-
switch (_d.label) {
|
|
7766
|
+
var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
7767
|
+
return __generator(this, function (_c) {
|
|
7768
|
+
switch (_c.label) {
|
|
7668
7769
|
case 0:
|
|
7669
7770
|
if (this.options.isVerbose) {
|
|
7670
7771
|
console.info('🖋 OpenAI callCompletionModel call');
|
|
7671
7772
|
}
|
|
7672
7773
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7774
|
+
return [4 /*yield*/, this.getClient()];
|
|
7775
|
+
case 1:
|
|
7776
|
+
client = _c.sent();
|
|
7673
7777
|
// TODO: [☂] Use here more modelRequirements
|
|
7674
7778
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7675
7779
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
7676
7780
|
}
|
|
7677
|
-
|
|
7678
|
-
case
|
|
7679
|
-
|
|
7781
|
+
_c.label = 2;
|
|
7782
|
+
case 2:
|
|
7783
|
+
_c.trys.push([2, 4, , 5]);
|
|
7680
7784
|
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7681
7785
|
modelSettings = {
|
|
7682
7786
|
maxTokens: modelRequirements.maxTokens || 2000,
|
|
@@ -7698,9 +7802,9 @@
|
|
|
7698
7802
|
[rawPromptContent],
|
|
7699
7803
|
modelSettings,
|
|
7700
7804
|
];
|
|
7701
|
-
return [4 /*yield*/,
|
|
7702
|
-
case
|
|
7703
|
-
rawResponse =
|
|
7805
|
+
return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
7806
|
+
case 3:
|
|
7807
|
+
rawResponse = _c.sent();
|
|
7704
7808
|
if (this.options.isVerbose) {
|
|
7705
7809
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7706
7810
|
}
|
|
@@ -7732,10 +7836,10 @@
|
|
|
7732
7836
|
rawResponse: rawResponse,
|
|
7733
7837
|
// <- [🗯]
|
|
7734
7838
|
}];
|
|
7735
|
-
case
|
|
7736
|
-
error_2 =
|
|
7839
|
+
case 4:
|
|
7840
|
+
error_2 = _c.sent();
|
|
7737
7841
|
throw this.transformAzureError(error_2);
|
|
7738
|
-
case
|
|
7842
|
+
case 5: return [2 /*return*/];
|
|
7739
7843
|
}
|
|
7740
7844
|
});
|
|
7741
7845
|
});
|
|
@@ -7751,25 +7855,6 @@
|
|
|
7751
7855
|
var code = azureError.code, message = azureError.message;
|
|
7752
7856
|
return new PipelineExecutionError("".concat(code, ": ").concat(message));
|
|
7753
7857
|
};
|
|
7754
|
-
/**
|
|
7755
|
-
* List all available Azure OpenAI models that can be used
|
|
7756
|
-
*/
|
|
7757
|
-
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
7758
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7759
|
-
return __generator(this, function (_a) {
|
|
7760
|
-
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
7761
|
-
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
7762
|
-
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
7763
|
-
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
7764
|
-
return ({
|
|
7765
|
-
modelTitle: "Azure ".concat(modelTitle),
|
|
7766
|
-
modelName: modelName,
|
|
7767
|
-
modelVariant: modelVariant,
|
|
7768
|
-
});
|
|
7769
|
-
})];
|
|
7770
|
-
});
|
|
7771
|
-
});
|
|
7772
|
-
};
|
|
7773
7858
|
return AzureOpenAiExecutionTools;
|
|
7774
7859
|
}());
|
|
7775
7860
|
/**
|
|
@@ -7831,12 +7916,10 @@
|
|
|
7831
7916
|
function OpenAiExecutionTools(options) {
|
|
7832
7917
|
if (options === void 0) { options = {}; }
|
|
7833
7918
|
this.options = options;
|
|
7834
|
-
|
|
7835
|
-
|
|
7836
|
-
|
|
7837
|
-
|
|
7838
|
-
this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
|
|
7839
|
-
// <- TODO: !!!!!! Lazy-load client
|
|
7919
|
+
/**
|
|
7920
|
+
* OpenAI API client.
|
|
7921
|
+
*/
|
|
7922
|
+
this.client = null;
|
|
7840
7923
|
}
|
|
7841
7924
|
Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
|
|
7842
7925
|
get: function () {
|
|
@@ -7852,12 +7935,54 @@
|
|
|
7852
7935
|
enumerable: false,
|
|
7853
7936
|
configurable: true
|
|
7854
7937
|
});
|
|
7938
|
+
OpenAiExecutionTools.prototype.getClient = function () {
|
|
7939
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7940
|
+
var openAiOptions;
|
|
7941
|
+
return __generator(this, function (_a) {
|
|
7942
|
+
if (this.client === null) {
|
|
7943
|
+
openAiOptions = __assign({}, this.options);
|
|
7944
|
+
delete openAiOptions.isVerbose;
|
|
7945
|
+
delete openAiOptions.user;
|
|
7946
|
+
this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
|
|
7947
|
+
}
|
|
7948
|
+
return [2 /*return*/, this.client];
|
|
7949
|
+
});
|
|
7950
|
+
});
|
|
7951
|
+
};
|
|
7952
|
+
/**
|
|
7953
|
+
* Check the `options` passed to `constructor`
|
|
7954
|
+
*/
|
|
7955
|
+
OpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
7956
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
7957
|
+
return __generator(this, function (_a) {
|
|
7958
|
+
switch (_a.label) {
|
|
7959
|
+
case 0: return [4 /*yield*/, this.getClient()];
|
|
7960
|
+
case 1:
|
|
7961
|
+
_a.sent();
|
|
7962
|
+
return [2 /*return*/];
|
|
7963
|
+
}
|
|
7964
|
+
});
|
|
7965
|
+
});
|
|
7966
|
+
};
|
|
7967
|
+
/**
|
|
7968
|
+
* List all available OpenAI models that can be used
|
|
7969
|
+
*/
|
|
7970
|
+
OpenAiExecutionTools.prototype.listModels = function () {
|
|
7971
|
+
/*
|
|
7972
|
+
Note: Dynamic lising of the models
|
|
7973
|
+
const models = await this.openai.models.list({});
|
|
7974
|
+
|
|
7975
|
+
console.log({ models });
|
|
7976
|
+
console.log(models.data);
|
|
7977
|
+
*/
|
|
7978
|
+
return OPENAI_MODELS;
|
|
7979
|
+
};
|
|
7855
7980
|
/**
|
|
7856
7981
|
* Calls OpenAI API to use a chat model.
|
|
7857
7982
|
*/
|
|
7858
7983
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7859
7984
|
return __awaiter(this, void 0, void 0, function () {
|
|
7860
|
-
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7985
|
+
var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7861
7986
|
return __generator(this, function (_a) {
|
|
7862
7987
|
switch (_a.label) {
|
|
7863
7988
|
case 0:
|
|
@@ -7865,6 +7990,9 @@
|
|
|
7865
7990
|
console.info('💬 OpenAI callChatModel call', { prompt: prompt });
|
|
7866
7991
|
}
|
|
7867
7992
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
|
|
7993
|
+
return [4 /*yield*/, this.getClient()];
|
|
7994
|
+
case 1:
|
|
7995
|
+
client = _a.sent();
|
|
7868
7996
|
// TODO: [☂] Use here more modelRequirements
|
|
7869
7997
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7870
7998
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
@@ -7901,8 +8029,8 @@
|
|
|
7901
8029
|
if (this.options.isVerbose) {
|
|
7902
8030
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7903
8031
|
}
|
|
7904
|
-
return [4 /*yield*/,
|
|
7905
|
-
case
|
|
8032
|
+
return [4 /*yield*/, client.chat.completions.create(rawRequest)];
|
|
8033
|
+
case 2:
|
|
7906
8034
|
rawResponse = _a.sent();
|
|
7907
8035
|
if (this.options.isVerbose) {
|
|
7908
8036
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -7943,7 +8071,7 @@
|
|
|
7943
8071
|
*/
|
|
7944
8072
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
7945
8073
|
return __awaiter(this, void 0, void 0, function () {
|
|
7946
|
-
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
8074
|
+
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7947
8075
|
return __generator(this, function (_a) {
|
|
7948
8076
|
switch (_a.label) {
|
|
7949
8077
|
case 0:
|
|
@@ -7951,6 +8079,9 @@
|
|
|
7951
8079
|
console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
|
|
7952
8080
|
}
|
|
7953
8081
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
8082
|
+
return [4 /*yield*/, this.getClient()];
|
|
8083
|
+
case 1:
|
|
8084
|
+
client = _a.sent();
|
|
7954
8085
|
// TODO: [☂] Use here more modelRequirements
|
|
7955
8086
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7956
8087
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
@@ -7970,8 +8101,8 @@
|
|
|
7970
8101
|
if (this.options.isVerbose) {
|
|
7971
8102
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7972
8103
|
}
|
|
7973
|
-
return [4 /*yield*/,
|
|
7974
|
-
case
|
|
8104
|
+
return [4 /*yield*/, client.completions.create(rawRequest)];
|
|
8105
|
+
case 2:
|
|
7975
8106
|
rawResponse = _a.sent();
|
|
7976
8107
|
if (this.options.isVerbose) {
|
|
7977
8108
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -8009,7 +8140,7 @@
|
|
|
8009
8140
|
*/
|
|
8010
8141
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
8011
8142
|
return __awaiter(this, void 0, void 0, function () {
|
|
8012
|
-
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
8143
|
+
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
8013
8144
|
return __generator(this, function (_a) {
|
|
8014
8145
|
switch (_a.label) {
|
|
8015
8146
|
case 0:
|
|
@@ -8017,6 +8148,9 @@
|
|
|
8017
8148
|
console.info('🖋 OpenAI embedding call', { prompt: prompt });
|
|
8018
8149
|
}
|
|
8019
8150
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
8151
|
+
return [4 /*yield*/, this.getClient()];
|
|
8152
|
+
case 1:
|
|
8153
|
+
client = _a.sent();
|
|
8020
8154
|
// TODO: [☂] Use here more modelRequirements
|
|
8021
8155
|
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
8022
8156
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
@@ -8031,8 +8165,8 @@
|
|
|
8031
8165
|
if (this.options.isVerbose) {
|
|
8032
8166
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
8033
8167
|
}
|
|
8034
|
-
return [4 /*yield*/,
|
|
8035
|
-
case
|
|
8168
|
+
return [4 /*yield*/, client.embeddings.create(rawRequest)];
|
|
8169
|
+
case 2:
|
|
8036
8170
|
rawResponse = _a.sent();
|
|
8037
8171
|
if (this.options.isVerbose) {
|
|
8038
8172
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -8098,20 +8232,6 @@
|
|
|
8098
8232
|
OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
|
|
8099
8233
|
return this.getDefaultModel('text-embedding-3-large');
|
|
8100
8234
|
};
|
|
8101
|
-
// <- Note: [🤖] getDefaultXxxModel
|
|
8102
|
-
/**
|
|
8103
|
-
* List all available OpenAI models that can be used
|
|
8104
|
-
*/
|
|
8105
|
-
OpenAiExecutionTools.prototype.listModels = function () {
|
|
8106
|
-
/*
|
|
8107
|
-
Note: Dynamic lising of the models
|
|
8108
|
-
const models = await this.openai.models.list({});
|
|
8109
|
-
|
|
8110
|
-
console.log({ models });
|
|
8111
|
-
console.log(models.data);
|
|
8112
|
-
*/
|
|
8113
|
-
return OPENAI_MODELS;
|
|
8114
|
-
};
|
|
8115
8235
|
return OpenAiExecutionTools;
|
|
8116
8236
|
}());
|
|
8117
8237
|
/**
|
|
@@ -8190,6 +8310,20 @@
|
|
|
8190
8310
|
* TODO: This should be maybe not under `_common` but under `utils`
|
|
8191
8311
|
*/
|
|
8192
8312
|
|
|
8313
|
+
/**
|
|
8314
|
+
* @@@
|
|
8315
|
+
*
|
|
8316
|
+
* Note: `$` is used to indicate that this function is not a pure function - it access global
|
|
8317
|
+
*
|
|
8318
|
+
* @public exported from `@promptbook/utils`
|
|
8319
|
+
*/
|
|
8320
|
+
function $getGlobalScope() {
|
|
8321
|
+
return Function('return this')();
|
|
8322
|
+
}
|
|
8323
|
+
/***
|
|
8324
|
+
* TODO: !!!!! Make private and promptbook registry from this
|
|
8325
|
+
*/
|
|
8326
|
+
|
|
8193
8327
|
/**
|
|
8194
8328
|
* Register is @@@
|
|
8195
8329
|
*
|
|
@@ -8204,13 +8338,30 @@
|
|
|
8204
8338
|
return this.storage;
|
|
8205
8339
|
};
|
|
8206
8340
|
Register.prototype.register = function (registered) {
|
|
8207
|
-
//
|
|
8208
|
-
|
|
8209
|
-
this.storage.
|
|
8341
|
+
// <- TODO: What to return here
|
|
8342
|
+
var packageName = registered.packageName, className = registered.className;
|
|
8343
|
+
var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
|
|
8344
|
+
var existingRegistration = this.storage[existingRegistrationIndex];
|
|
8345
|
+
if (existingRegistration) {
|
|
8346
|
+
console.warn("!!!!!! Re-registering ".concat(packageName, ".").concat(className, " again"));
|
|
8347
|
+
this.storage[existingRegistrationIndex] = registered;
|
|
8348
|
+
}
|
|
8349
|
+
else {
|
|
8350
|
+
this.storage.push(registered);
|
|
8351
|
+
}
|
|
8210
8352
|
};
|
|
8211
8353
|
return Register;
|
|
8212
8354
|
}());
|
|
8213
8355
|
|
|
8356
|
+
// TODO: !!!!!! Move this logic to Register and rename to $Register
|
|
8357
|
+
var globalScope = $getGlobalScope();
|
|
8358
|
+
if (globalScope.$llmToolsMetadataRegister === undefined) {
|
|
8359
|
+
globalScope.$llmToolsMetadataRegister = [];
|
|
8360
|
+
}
|
|
8361
|
+
else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
|
|
8362
|
+
throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
|
|
8363
|
+
}
|
|
8364
|
+
var _ = globalScope.$llmToolsMetadataRegister;
|
|
8214
8365
|
/**
|
|
8215
8366
|
* @@@
|
|
8216
8367
|
*
|
|
@@ -8218,9 +8369,8 @@
|
|
|
8218
8369
|
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
8219
8370
|
* @public exported from `@promptbook/core`
|
|
8220
8371
|
*/
|
|
8221
|
-
var $llmToolsMetadataRegister = new Register(
|
|
8222
|
-
|
|
8223
|
-
]);
|
|
8372
|
+
var $llmToolsMetadataRegister = new Register(_);
|
|
8373
|
+
$getGlobalScope().$llmToolsMetadataRegister;
|
|
8224
8374
|
|
|
8225
8375
|
/**
|
|
8226
8376
|
* @@@
|
|
@@ -8277,7 +8427,7 @@
|
|
|
8277
8427
|
var configuration = createLlmToolsFromConfigurationFromEnv();
|
|
8278
8428
|
if (configuration.length === 0) {
|
|
8279
8429
|
// TODO: [🥃]
|
|
8280
|
-
throw new Error(spaceTrim__default["default"]("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
|
|
8430
|
+
throw new Error(spaceTrim__default["default"]("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n !!!!!!! List all available LLM tools in your environment\n - Azure \n - OpenAI (not imported)\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
|
|
8281
8431
|
}
|
|
8282
8432
|
return createLlmToolsFromConfiguration(configuration, options);
|
|
8283
8433
|
}
|
|
@@ -8495,7 +8645,6 @@
|
|
|
8495
8645
|
* TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
|
|
8496
8646
|
*/
|
|
8497
8647
|
|
|
8498
|
-
// TODO: !!!!!! Probbably all LLM tools should be registered in `@promptbook/cli`
|
|
8499
8648
|
/**
|
|
8500
8649
|
* Initializes `make` command for Promptbook CLI utilities
|
|
8501
8650
|
*
|
|
@@ -9007,9 +9156,9 @@
|
|
|
9007
9156
|
*
|
|
9008
9157
|
* @public exported from `@promptbook/cli`
|
|
9009
9158
|
*/
|
|
9010
|
-
var
|
|
9159
|
+
var _CLI = {
|
|
9011
9160
|
// Note: [🥠]
|
|
9012
|
-
|
|
9161
|
+
_initialize: promptbookCli,
|
|
9013
9162
|
};
|
|
9014
9163
|
/**
|
|
9015
9164
|
* Note: [🟡] This code should never be published outside of `@promptbook/cli`
|
|
@@ -9094,8 +9243,8 @@
|
|
|
9094
9243
|
|
|
9095
9244
|
exports.PROMPTBOOK_VERSION = PROMPTBOOK_VERSION;
|
|
9096
9245
|
exports._AnthropicClaudeMetadataRegistration = _AnthropicClaudeMetadataRegistration;
|
|
9246
|
+
exports._CLI = _CLI;
|
|
9097
9247
|
exports._OpenAiMetadataRegistration = _OpenAiMetadataRegistration;
|
|
9098
|
-
exports.__CLI = __CLI;
|
|
9099
9248
|
|
|
9100
9249
|
Object.defineProperty(exports, '__esModule', { value: true });
|
|
9101
9250
|
|