@promptbook/core 0.66.0-8 → 0.66.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +174 -1706
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
- package/esm/typings/src/_packages/cli.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +22 -14
- package/esm/typings/src/_packages/utils.index.d.ts +7 -7
- package/esm/typings/src/config.d.ts +6 -0
- package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
- package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -0
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
- package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
- package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
- package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
- package/esm/typings/src/utils/organization/f.d.ts +6 -0
- package/package.json +1 -6
- package/umd/index.umd.js +178 -1707
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
- package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
- /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
- /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
- /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
- /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
- /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
- /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/esm/index.es.js
CHANGED
|
@@ -3,18 +3,13 @@ import { format } from 'prettier';
|
|
|
3
3
|
import parserHtml from 'prettier/parser-html';
|
|
4
4
|
import hexEncoder from 'crypto-js/enc-hex';
|
|
5
5
|
import sha256 from 'crypto-js/sha256';
|
|
6
|
-
import { io } from 'socket.io-client';
|
|
7
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
8
|
-
import colors from 'colors';
|
|
9
|
-
import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
|
|
10
|
-
import OpenAI from 'openai';
|
|
11
6
|
import moment from 'moment';
|
|
12
7
|
|
|
13
8
|
// ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
|
|
14
9
|
/**
|
|
15
10
|
* The version of the Promptbook library
|
|
16
11
|
*/
|
|
17
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
12
|
+
var PROMPTBOOK_VERSION = '0.66.0-9';
|
|
18
13
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
19
14
|
|
|
20
15
|
/*! *****************************************************************************
|
|
@@ -595,6 +590,13 @@ var DEFAULT_REMOTE_URL = 'https://api.pavolhejny.com/';
|
|
|
595
590
|
* @public exported from `@promptbook/core`
|
|
596
591
|
*/
|
|
597
592
|
var DEFAULT_REMOTE_URL_PATH = '/promptbook/socket.io';
|
|
593
|
+
// <- TODO: [🧜♂️]
|
|
594
|
+
/**
|
|
595
|
+
* @@@
|
|
596
|
+
*
|
|
597
|
+
* @public exported from `@promptbook/core`
|
|
598
|
+
*/
|
|
599
|
+
var IS_VERBOSE = false;
|
|
598
600
|
/**
|
|
599
601
|
* TODO: [🧠][🧜♂️] Maybe join remoteUrl and path into single value
|
|
600
602
|
*/
|
|
@@ -1328,7 +1330,7 @@ function createCollectionFromUrl(url, options) {
|
|
|
1328
1330
|
return __generator(this, function (_d) {
|
|
1329
1331
|
switch (_d.label) {
|
|
1330
1332
|
case 0:
|
|
1331
|
-
_a = options || {}, _b = _a.isVerbose, isVerbose = _b === void 0 ?
|
|
1333
|
+
_a = options || {}, _b = _a.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b, _c = _a.isLazyLoaded, isLazyLoaded = _c === void 0 ? false : _c;
|
|
1332
1334
|
collection = createCollectionFromPromise(function () { return __awaiter(_this, void 0, void 0, function () {
|
|
1333
1335
|
return __generator(this, function (_a) {
|
|
1334
1336
|
if (isVerbose) {
|
|
@@ -1645,7 +1647,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
1645
1647
|
});
|
|
1646
1648
|
}
|
|
1647
1649
|
|
|
1648
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-
|
|
1650
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
1649
1651
|
|
|
1650
1652
|
var defaultDiacriticsRemovalMap = [
|
|
1651
1653
|
{
|
|
@@ -2243,8 +2245,37 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
2243
2245
|
*/
|
|
2244
2246
|
MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
2245
2247
|
return __awaiter(this, void 0, void 0, function () {
|
|
2246
|
-
|
|
2247
|
-
|
|
2248
|
+
var _a, _b, llmExecutionTools, e_1_1;
|
|
2249
|
+
var e_1, _c;
|
|
2250
|
+
return __generator(this, function (_d) {
|
|
2251
|
+
switch (_d.label) {
|
|
2252
|
+
case 0:
|
|
2253
|
+
_d.trys.push([0, 5, 6, 7]);
|
|
2254
|
+
_a = __values(this.llmExecutionTools), _b = _a.next();
|
|
2255
|
+
_d.label = 1;
|
|
2256
|
+
case 1:
|
|
2257
|
+
if (!!_b.done) return [3 /*break*/, 4];
|
|
2258
|
+
llmExecutionTools = _b.value;
|
|
2259
|
+
return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
|
|
2260
|
+
case 2:
|
|
2261
|
+
_d.sent();
|
|
2262
|
+
_d.label = 3;
|
|
2263
|
+
case 3:
|
|
2264
|
+
_b = _a.next();
|
|
2265
|
+
return [3 /*break*/, 1];
|
|
2266
|
+
case 4: return [3 /*break*/, 7];
|
|
2267
|
+
case 5:
|
|
2268
|
+
e_1_1 = _d.sent();
|
|
2269
|
+
e_1 = { error: e_1_1 };
|
|
2270
|
+
return [3 /*break*/, 7];
|
|
2271
|
+
case 6:
|
|
2272
|
+
try {
|
|
2273
|
+
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
2274
|
+
}
|
|
2275
|
+
finally { if (e_1) throw e_1.error; }
|
|
2276
|
+
return [7 /*endfinally*/];
|
|
2277
|
+
case 7: return [2 /*return*/];
|
|
2278
|
+
}
|
|
2248
2279
|
});
|
|
2249
2280
|
});
|
|
2250
2281
|
};
|
|
@@ -2254,8 +2285,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
2254
2285
|
*/
|
|
2255
2286
|
MultipleLlmExecutionTools.prototype.listModels = function () {
|
|
2256
2287
|
return __awaiter(this, void 0, void 0, function () {
|
|
2257
|
-
var availableModels, _a, _b, llmExecutionTools, models,
|
|
2258
|
-
var
|
|
2288
|
+
var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
|
|
2289
|
+
var e_2, _c;
|
|
2259
2290
|
return __generator(this, function (_d) {
|
|
2260
2291
|
switch (_d.label) {
|
|
2261
2292
|
case 0:
|
|
@@ -2278,14 +2309,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
2278
2309
|
return [3 /*break*/, 2];
|
|
2279
2310
|
case 5: return [3 /*break*/, 8];
|
|
2280
2311
|
case 6:
|
|
2281
|
-
|
|
2282
|
-
|
|
2312
|
+
e_2_1 = _d.sent();
|
|
2313
|
+
e_2 = { error: e_2_1 };
|
|
2283
2314
|
return [3 /*break*/, 8];
|
|
2284
2315
|
case 7:
|
|
2285
2316
|
try {
|
|
2286
2317
|
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
2287
2318
|
}
|
|
2288
|
-
finally { if (
|
|
2319
|
+
finally { if (e_2) throw e_2.error; }
|
|
2289
2320
|
return [7 /*endfinally*/];
|
|
2290
2321
|
case 8: return [2 /*return*/, availableModels];
|
|
2291
2322
|
}
|
|
@@ -2318,8 +2349,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
2318
2349
|
*/
|
|
2319
2350
|
MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
2320
2351
|
return __awaiter(this, void 0, void 0, function () {
|
|
2321
|
-
var errors, _a, _b, llmExecutionTools, _c, error_1,
|
|
2322
|
-
var
|
|
2352
|
+
var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
|
|
2353
|
+
var e_3, _d;
|
|
2323
2354
|
var _this = this;
|
|
2324
2355
|
return __generator(this, function (_e) {
|
|
2325
2356
|
switch (_e.label) {
|
|
@@ -2375,14 +2406,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
2375
2406
|
return [3 /*break*/, 2];
|
|
2376
2407
|
case 14: return [3 /*break*/, 17];
|
|
2377
2408
|
case 15:
|
|
2378
|
-
|
|
2379
|
-
|
|
2409
|
+
e_3_1 = _e.sent();
|
|
2410
|
+
e_3 = { error: e_3_1 };
|
|
2380
2411
|
return [3 /*break*/, 17];
|
|
2381
2412
|
case 16:
|
|
2382
2413
|
try {
|
|
2383
2414
|
if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
|
|
2384
2415
|
}
|
|
2385
|
-
finally { if (
|
|
2416
|
+
finally { if (e_3) throw e_3.error; }
|
|
2386
2417
|
return [7 /*endfinally*/];
|
|
2387
2418
|
case 17:
|
|
2388
2419
|
if (errors.length === 1) {
|
|
@@ -2865,7 +2896,7 @@ function isPassingExpectations(expectations, value) {
|
|
|
2865
2896
|
function createPipelineExecutor(options) {
|
|
2866
2897
|
var _this = this;
|
|
2867
2898
|
var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
|
|
2868
|
-
var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ?
|
|
2899
|
+
var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? IS_VERBOSE : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
|
|
2869
2900
|
validatePipeline(pipeline);
|
|
2870
2901
|
var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
|
|
2871
2902
|
var preparedPipeline;
|
|
@@ -3660,7 +3691,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
|
|
|
3660
3691
|
return __generator(this, function (_j) {
|
|
3661
3692
|
switch (_j.label) {
|
|
3662
3693
|
case 0:
|
|
3663
|
-
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ?
|
|
3694
|
+
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b;
|
|
3664
3695
|
TODO_USE(maxParallelCount); // <- [🪂]
|
|
3665
3696
|
collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
|
|
3666
3697
|
_c = createPipelineExecutor;
|
|
@@ -3947,7 +3978,7 @@ function preparePersona(personaDescription, options) {
|
|
|
3947
3978
|
return __generator(this, function (_d) {
|
|
3948
3979
|
switch (_d.label) {
|
|
3949
3980
|
case 0:
|
|
3950
|
-
llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ?
|
|
3981
|
+
llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ? IS_VERBOSE : _a;
|
|
3951
3982
|
collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
|
|
3952
3983
|
_b = createPipelineExecutor;
|
|
3953
3984
|
_c = {};
|
|
@@ -4104,7 +4135,7 @@ function preparePipeline(pipeline, options) {
|
|
|
4104
4135
|
if (isPipelinePrepared(pipeline)) {
|
|
4105
4136
|
return [2 /*return*/, pipeline];
|
|
4106
4137
|
}
|
|
4107
|
-
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ?
|
|
4138
|
+
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b;
|
|
4108
4139
|
parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
4109
4140
|
llmToolsWithUsage = countTotalUsage(llmTools);
|
|
4110
4141
|
currentPreparation = {
|
|
@@ -6517,7 +6548,7 @@ var CallbackInterfaceTools = /** @class */ (function () {
|
|
|
6517
6548
|
/**
|
|
6518
6549
|
* @@@
|
|
6519
6550
|
*
|
|
6520
|
-
* Note: `$` is used to indicate that this function is not a pure function - it access global
|
|
6551
|
+
* Note: `$` is used to indicate that this function is not a pure function - it access global scope
|
|
6521
6552
|
*
|
|
6522
6553
|
* @public exported from `@promptbook/utils`
|
|
6523
6554
|
*/
|
|
@@ -6531,42 +6562,42 @@ function $getGlobalScope() {
|
|
|
6531
6562
|
/**
|
|
6532
6563
|
* Register is @@@
|
|
6533
6564
|
*
|
|
6565
|
+
* Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
|
|
6566
|
+
*
|
|
6534
6567
|
* @private internal utility, exported are only signleton instances of this class
|
|
6535
6568
|
*/
|
|
6536
|
-
var Register = /** @class */ (function () {
|
|
6537
|
-
function Register(
|
|
6538
|
-
this.
|
|
6569
|
+
var $Register = /** @class */ (function () {
|
|
6570
|
+
function $Register(storageName) {
|
|
6571
|
+
this.storageName = storageName;
|
|
6572
|
+
storageName = "_promptbook_".concat(storageName);
|
|
6573
|
+
var globalScope = $getGlobalScope();
|
|
6574
|
+
if (globalScope[storageName] === undefined) {
|
|
6575
|
+
globalScope[storageName] = [];
|
|
6576
|
+
}
|
|
6577
|
+
else if (!Array.isArray(globalScope[storageName])) {
|
|
6578
|
+
throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
|
|
6579
|
+
}
|
|
6580
|
+
this.storage = globalScope[storageName];
|
|
6539
6581
|
}
|
|
6540
|
-
Register.prototype.list = function () {
|
|
6582
|
+
$Register.prototype.list = function () {
|
|
6541
6583
|
// <- TODO: ReadonlyDeep<Array<TRegistered>>
|
|
6542
6584
|
return this.storage;
|
|
6543
6585
|
};
|
|
6544
|
-
Register.prototype.register = function (registered) {
|
|
6586
|
+
$Register.prototype.register = function (registered) {
|
|
6545
6587
|
// <- TODO: What to return here
|
|
6546
6588
|
var packageName = registered.packageName, className = registered.className;
|
|
6547
6589
|
var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
|
|
6548
6590
|
var existingRegistration = this.storage[existingRegistrationIndex];
|
|
6549
6591
|
if (!existingRegistration) {
|
|
6550
|
-
console.warn("[\uD83D\uDCE6] Registering ".concat(packageName, ".").concat(className, " again"));
|
|
6551
6592
|
this.storage.push(registered);
|
|
6552
6593
|
}
|
|
6553
6594
|
else {
|
|
6554
|
-
console.warn("[\uD83D\uDCE6] Re-registering ".concat(packageName, ".").concat(className, " again"));
|
|
6555
6595
|
this.storage[existingRegistrationIndex] = registered;
|
|
6556
6596
|
}
|
|
6557
6597
|
};
|
|
6558
|
-
return Register;
|
|
6598
|
+
return $Register;
|
|
6559
6599
|
}());
|
|
6560
6600
|
|
|
6561
|
-
// TODO: !!!!!! Move this logic to Register and rename to $Register
|
|
6562
|
-
var globalScope = $getGlobalScope();
|
|
6563
|
-
if (globalScope.$llmToolsMetadataRegister === undefined) {
|
|
6564
|
-
globalScope.$llmToolsMetadataRegister = [];
|
|
6565
|
-
}
|
|
6566
|
-
else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
|
|
6567
|
-
throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
|
|
6568
|
-
}
|
|
6569
|
-
var _ = globalScope.$llmToolsMetadataRegister;
|
|
6570
6601
|
/**
|
|
6571
6602
|
* @@@
|
|
6572
6603
|
*
|
|
@@ -6574,8 +6605,7 @@ var _ = globalScope.$llmToolsMetadataRegister;
|
|
|
6574
6605
|
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
6575
6606
|
* @public exported from `@promptbook/core`
|
|
6576
6607
|
*/
|
|
6577
|
-
var $llmToolsMetadataRegister = new Register(
|
|
6578
|
-
$getGlobalScope().$llmToolsMetadataRegister;
|
|
6608
|
+
var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
|
|
6579
6609
|
|
|
6580
6610
|
/**
|
|
6581
6611
|
* @@@
|
|
@@ -6584,1687 +6614,125 @@ $getGlobalScope().$llmToolsMetadataRegister;
|
|
|
6584
6614
|
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
6585
6615
|
* @public exported from `@promptbook/core`
|
|
6586
6616
|
*/
|
|
6587
|
-
var $llmToolsRegister = new Register(
|
|
6588
|
-
// TODO: !!!!!! Take from global scope
|
|
6589
|
-
]);
|
|
6617
|
+
var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
6590
6618
|
|
|
6591
6619
|
/**
|
|
6592
|
-
*
|
|
6620
|
+
* Creates a message with all registered LLM tools
|
|
6593
6621
|
*
|
|
6594
|
-
*
|
|
6595
|
-
* This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
|
|
6622
|
+
* Note: This function is used to create a (error) message when there is no constructor for some LLM provider
|
|
6596
6623
|
*
|
|
6597
|
-
* @
|
|
6598
|
-
* @public exported from `@promptbook/remote-client`
|
|
6624
|
+
* @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
|
|
6599
6625
|
*/
|
|
6600
|
-
|
|
6601
|
-
|
|
6602
|
-
this.options = options;
|
|
6603
|
-
}
|
|
6604
|
-
Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
|
|
6605
|
-
get: function () {
|
|
6606
|
-
// TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
|
|
6607
|
-
return 'Remote server';
|
|
6608
|
-
},
|
|
6609
|
-
enumerable: false,
|
|
6610
|
-
configurable: true
|
|
6611
|
-
});
|
|
6612
|
-
Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
|
|
6613
|
-
get: function () {
|
|
6614
|
-
return 'Use all models by your remote server';
|
|
6615
|
-
},
|
|
6616
|
-
enumerable: false,
|
|
6617
|
-
configurable: true
|
|
6618
|
-
});
|
|
6619
|
-
/**
|
|
6620
|
-
* Check the configuration of all execution tools
|
|
6621
|
-
*/
|
|
6622
|
-
RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
6623
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6624
|
-
return __generator(this, function (_a) {
|
|
6625
|
-
return [2 /*return*/];
|
|
6626
|
-
});
|
|
6627
|
-
});
|
|
6628
|
-
};
|
|
6629
|
-
/**
|
|
6630
|
-
* List all available models that can be used
|
|
6631
|
-
*/
|
|
6632
|
-
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
6633
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6634
|
-
return __generator(this, function (_a) {
|
|
6635
|
-
return [2 /*return*/, (this.options.models ||
|
|
6636
|
-
[
|
|
6637
|
-
/* !!!!!! */
|
|
6638
|
-
])];
|
|
6639
|
-
});
|
|
6640
|
-
});
|
|
6641
|
-
};
|
|
6642
|
-
/**
|
|
6643
|
-
* Creates a connection to the remote proxy server.
|
|
6644
|
-
*/
|
|
6645
|
-
RemoteLlmExecutionTools.prototype.makeConnection = function () {
|
|
6646
|
-
var _this = this;
|
|
6647
|
-
return new Promise(
|
|
6648
|
-
// <- TODO: [🧱] Implement in a functional (not new Class) way
|
|
6649
|
-
function (resolve, reject) {
|
|
6650
|
-
var socket = io(_this.options.remoteUrl, {
|
|
6651
|
-
path: _this.options.path,
|
|
6652
|
-
// path: `${this.remoteUrl.pathname}/socket.io`,
|
|
6653
|
-
transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
|
|
6654
|
-
});
|
|
6655
|
-
// console.log('Connecting to', this.options.remoteUrl.href, { socket });
|
|
6656
|
-
socket.on('connect', function () {
|
|
6657
|
-
resolve(socket);
|
|
6658
|
-
});
|
|
6659
|
-
// TODO: !!!! Better timeout handling
|
|
6660
|
-
setTimeout(function () {
|
|
6661
|
-
reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
|
|
6662
|
-
}, 1000 /* <- TODO: Timeout to config */);
|
|
6663
|
-
});
|
|
6664
|
-
};
|
|
6665
|
-
/**
|
|
6666
|
-
* Calls remote proxy server to use a chat model
|
|
6667
|
-
*/
|
|
6668
|
-
RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6669
|
-
if (this.options.isVerbose) {
|
|
6670
|
-
console.info("\uD83D\uDD8B Remote callChatModel call");
|
|
6671
|
-
}
|
|
6672
|
-
return /* not await */ this.callCommonModel(prompt);
|
|
6673
|
-
};
|
|
6626
|
+
function $registeredLlmToolsMessage() {
|
|
6627
|
+
var e_1, _a, e_2, _b;
|
|
6674
6628
|
/**
|
|
6675
|
-
*
|
|
6629
|
+
* Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
|
|
6676
6630
|
*/
|
|
6677
|
-
|
|
6678
|
-
|
|
6679
|
-
|
|
6631
|
+
var all = [];
|
|
6632
|
+
var _loop_1 = function (packageName, className) {
|
|
6633
|
+
if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
|
|
6634
|
+
return "continue";
|
|
6680
6635
|
}
|
|
6681
|
-
|
|
6636
|
+
all.push({ packageName: packageName, className: className });
|
|
6682
6637
|
};
|
|
6683
|
-
|
|
6684
|
-
|
|
6685
|
-
|
|
6686
|
-
|
|
6687
|
-
if (this.options.isVerbose) {
|
|
6688
|
-
console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
|
|
6638
|
+
try {
|
|
6639
|
+
for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
|
|
6640
|
+
var _e = _d.value, packageName = _e.packageName, className = _e.className;
|
|
6641
|
+
_loop_1(packageName, className);
|
|
6689
6642
|
}
|
|
6690
|
-
return /* not await */ this.callCommonModel(prompt);
|
|
6691
|
-
};
|
|
6692
|
-
// <- Note: [🤖] callXxxModel
|
|
6693
|
-
/**
|
|
6694
|
-
* Calls remote proxy server to use both completion or chat model
|
|
6695
|
-
*/
|
|
6696
|
-
RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
6697
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6698
|
-
var socket, promptResult;
|
|
6699
|
-
return __generator(this, function (_a) {
|
|
6700
|
-
switch (_a.label) {
|
|
6701
|
-
case 0: return [4 /*yield*/, this.makeConnection()];
|
|
6702
|
-
case 1:
|
|
6703
|
-
socket = _a.sent();
|
|
6704
|
-
if (this.options.isAnonymous) {
|
|
6705
|
-
socket.emit('request', {
|
|
6706
|
-
llmToolsConfiguration: this.options.llmToolsConfiguration,
|
|
6707
|
-
prompt: prompt,
|
|
6708
|
-
// <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
6709
|
-
});
|
|
6710
|
-
}
|
|
6711
|
-
else {
|
|
6712
|
-
socket.emit('request', {
|
|
6713
|
-
clientId: this.options.clientId,
|
|
6714
|
-
prompt: prompt,
|
|
6715
|
-
// <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
6716
|
-
});
|
|
6717
|
-
}
|
|
6718
|
-
return [4 /*yield*/, new Promise(function (resolve, reject) {
|
|
6719
|
-
socket.on('response', function (response) {
|
|
6720
|
-
resolve(response.promptResult);
|
|
6721
|
-
socket.disconnect();
|
|
6722
|
-
});
|
|
6723
|
-
socket.on('error', function (error) {
|
|
6724
|
-
reject(new PipelineExecutionError(error.errorMessage));
|
|
6725
|
-
socket.disconnect();
|
|
6726
|
-
});
|
|
6727
|
-
})];
|
|
6728
|
-
case 2:
|
|
6729
|
-
promptResult = _a.sent();
|
|
6730
|
-
socket.disconnect();
|
|
6731
|
-
return [2 /*return*/, promptResult];
|
|
6732
|
-
}
|
|
6733
|
-
});
|
|
6734
|
-
});
|
|
6735
|
-
};
|
|
6736
|
-
return RemoteLlmExecutionTools;
|
|
6737
|
-
}());
|
|
6738
|
-
/**
|
|
6739
|
-
* TODO: [🍓] Allow to list compatible models with each variant
|
|
6740
|
-
* TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
|
|
6741
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
6742
|
-
* TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
|
|
6743
|
-
*/
|
|
6744
|
-
|
|
6745
|
-
/**
|
|
6746
|
-
* Function computeUsage will create price per one token based on the string value found on openai page
|
|
6747
|
-
*
|
|
6748
|
-
* @private within the repository, used only as internal helper for `OPENAI_MODELS`
|
|
6749
|
-
*/
|
|
6750
|
-
function computeUsage(value) {
|
|
6751
|
-
var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
|
|
6752
|
-
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
|
|
6753
|
-
}
|
|
6754
|
-
|
|
6755
|
-
/**
|
|
6756
|
-
* List of available Anthropic Claude models with pricing
|
|
6757
|
-
*
|
|
6758
|
-
* Note: Done at 2024-08-16
|
|
6759
|
-
*
|
|
6760
|
-
* @see https://docs.anthropic.com/en/docs/models-overview
|
|
6761
|
-
* @public exported from `@promptbook/anthropic-claude`
|
|
6762
|
-
*/
|
|
6763
|
-
var ANTHROPIC_CLAUDE_MODELS = [
|
|
6764
|
-
{
|
|
6765
|
-
modelVariant: 'CHAT',
|
|
6766
|
-
modelTitle: 'Claude 3.5 Sonnet',
|
|
6767
|
-
modelName: 'claude-3-5-sonnet-20240620',
|
|
6768
|
-
pricing: {
|
|
6769
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6770
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
6771
|
-
},
|
|
6772
|
-
},
|
|
6773
|
-
{
|
|
6774
|
-
modelVariant: 'CHAT',
|
|
6775
|
-
modelTitle: 'Claude 3 Opus',
|
|
6776
|
-
modelName: 'claude-3-opus-20240229',
|
|
6777
|
-
pricing: {
|
|
6778
|
-
prompt: computeUsage("$15.00 / 1M tokens"),
|
|
6779
|
-
output: computeUsage("$75.00 / 1M tokens"),
|
|
6780
|
-
},
|
|
6781
|
-
},
|
|
6782
|
-
{
|
|
6783
|
-
modelVariant: 'CHAT',
|
|
6784
|
-
modelTitle: 'Claude 3 Sonnet',
|
|
6785
|
-
modelName: 'claude-3-sonnet-20240229',
|
|
6786
|
-
pricing: {
|
|
6787
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6788
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
6789
|
-
},
|
|
6790
|
-
},
|
|
6791
|
-
{
|
|
6792
|
-
modelVariant: 'CHAT',
|
|
6793
|
-
modelTitle: 'Claude 3 Haiku',
|
|
6794
|
-
modelName: ' claude-3-haiku-20240307',
|
|
6795
|
-
pricing: {
|
|
6796
|
-
prompt: computeUsage("$0.25 / 1M tokens"),
|
|
6797
|
-
output: computeUsage("$1.25 / 1M tokens"),
|
|
6798
|
-
},
|
|
6799
|
-
},
|
|
6800
|
-
{
|
|
6801
|
-
modelVariant: 'CHAT',
|
|
6802
|
-
modelTitle: 'Claude 2.1',
|
|
6803
|
-
modelName: 'claude-2.1',
|
|
6804
|
-
pricing: {
|
|
6805
|
-
prompt: computeUsage("$8.00 / 1M tokens"),
|
|
6806
|
-
output: computeUsage("$24.00 / 1M tokens"),
|
|
6807
|
-
},
|
|
6808
|
-
},
|
|
6809
|
-
{
|
|
6810
|
-
modelVariant: 'CHAT',
|
|
6811
|
-
modelTitle: 'Claude 2',
|
|
6812
|
-
modelName: 'claude-2.0',
|
|
6813
|
-
pricing: {
|
|
6814
|
-
prompt: computeUsage("$8.00 / 1M tokens"),
|
|
6815
|
-
output: computeUsage("$24.00 / 1M tokens"),
|
|
6816
|
-
},
|
|
6817
|
-
},
|
|
6818
|
-
{
|
|
6819
|
-
modelVariant: 'CHAT',
|
|
6820
|
-
modelTitle: ' Claude Instant 1.2',
|
|
6821
|
-
modelName: 'claude-instant-1.2',
|
|
6822
|
-
pricing: {
|
|
6823
|
-
prompt: computeUsage("$0.80 / 1M tokens"),
|
|
6824
|
-
output: computeUsage("$2.40 / 1M tokens"),
|
|
6825
|
-
},
|
|
6826
|
-
},
|
|
6827
|
-
// TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
|
|
6828
|
-
];
|
|
6829
|
-
/**
|
|
6830
|
-
* Note: [🤖] Add models of new variant
|
|
6831
|
-
* TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
|
|
6832
|
-
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
6833
|
-
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
6834
|
-
* TODO: [🎰] Some mechanism to auto-update available models
|
|
6835
|
-
*/
|
|
6836
|
-
|
|
6837
|
-
/**
|
|
6838
|
-
* Get current date in ISO 8601 format
|
|
6839
|
-
*
|
|
6840
|
-
* @private internal utility
|
|
6841
|
-
*/
|
|
6842
|
-
function getCurrentIsoDate() {
|
|
6843
|
-
return new Date().toISOString();
|
|
6844
|
-
}
|
|
6845
|
-
|
|
6846
|
-
/**
|
|
6847
|
-
* Helper of usage compute
|
|
6848
|
-
*
|
|
6849
|
-
* @param content the content of prompt or response
|
|
6850
|
-
* @returns part of PromptResultUsageCounts
|
|
6851
|
-
*
|
|
6852
|
-
* @private internal utility of LlmExecutionTools
|
|
6853
|
-
*/
|
|
6854
|
-
function computeUsageCounts(content) {
|
|
6855
|
-
return {
|
|
6856
|
-
charactersCount: { value: countCharacters(content) },
|
|
6857
|
-
wordsCount: { value: countWords(content) },
|
|
6858
|
-
sentencesCount: { value: countSentences(content) },
|
|
6859
|
-
linesCount: { value: countLines(content) },
|
|
6860
|
-
paragraphsCount: { value: countParagraphs(content) },
|
|
6861
|
-
pagesCount: { value: countPages(content) },
|
|
6862
|
-
};
|
|
6863
|
-
}
|
|
6864
|
-
|
|
6865
|
-
/**
|
|
6866
|
-
* Make UncertainNumber
|
|
6867
|
-
*
|
|
6868
|
-
* @param value
|
|
6869
|
-
*
|
|
6870
|
-
* @private utility for initializating UncertainNumber
|
|
6871
|
-
*/
|
|
6872
|
-
function uncertainNumber(value) {
|
|
6873
|
-
if (value === null || value === undefined || Number.isNaN(value)) {
|
|
6874
|
-
return { value: 0, isUncertain: true };
|
|
6875
|
-
}
|
|
6876
|
-
return { value: value };
|
|
6877
|
-
}
|
|
6878
|
-
|
|
6879
|
-
/**
|
|
6880
|
-
* Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
|
|
6881
|
-
*
|
|
6882
|
-
* @param promptContent The content of the prompt
|
|
6883
|
-
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
6884
|
-
* @param rawResponse The raw response from Anthropic Claude API
|
|
6885
|
-
* @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
|
|
6886
|
-
* @private internal utility of `AnthropicClaudeExecutionTools`
|
|
6887
|
-
*/
|
|
6888
|
-
function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
6889
|
-
resultContent, rawResponse) {
|
|
6890
|
-
var _a, _b;
|
|
6891
|
-
if (rawResponse.usage === undefined) {
|
|
6892
|
-
throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
|
|
6893
6643
|
}
|
|
6894
|
-
|
|
6895
|
-
|
|
6644
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
6645
|
+
finally {
|
|
6646
|
+
try {
|
|
6647
|
+
if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
|
|
6648
|
+
}
|
|
6649
|
+
finally { if (e_1) throw e_1.error; }
|
|
6896
6650
|
}
|
|
6897
|
-
var
|
|
6898
|
-
|
|
6899
|
-
|
|
6900
|
-
|
|
6901
|
-
|
|
6902
|
-
|
|
6651
|
+
var _loop_2 = function (packageName, className) {
|
|
6652
|
+
if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
|
|
6653
|
+
return "continue";
|
|
6654
|
+
}
|
|
6655
|
+
all.push({ packageName: packageName, className: className });
|
|
6656
|
+
};
|
|
6657
|
+
try {
|
|
6658
|
+
for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
|
|
6659
|
+
var _h = _g.value, packageName = _h.packageName, className = _h.className;
|
|
6660
|
+
_loop_2(packageName, className);
|
|
6661
|
+
}
|
|
6903
6662
|
}
|
|
6904
|
-
|
|
6905
|
-
|
|
6663
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
6664
|
+
finally {
|
|
6665
|
+
try {
|
|
6666
|
+
if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
|
|
6667
|
+
}
|
|
6668
|
+
finally { if (e_2) throw e_2.error; }
|
|
6906
6669
|
}
|
|
6907
|
-
|
|
6908
|
-
|
|
6909
|
-
|
|
6910
|
-
|
|
6911
|
-
|
|
6670
|
+
var metadata = all.map(function (metadata) {
|
|
6671
|
+
var isMetadataAviailable = $llmToolsMetadataRegister
|
|
6672
|
+
.list()
|
|
6673
|
+
.find(function (_a) {
|
|
6674
|
+
var packageName = _a.packageName, className = _a.className;
|
|
6675
|
+
return metadata.packageName === packageName && metadata.className === className;
|
|
6676
|
+
});
|
|
6677
|
+
var isInstalled = $llmToolsRegister
|
|
6678
|
+
.list()
|
|
6679
|
+
.find(function (_a) {
|
|
6680
|
+
var packageName = _a.packageName, className = _a.className;
|
|
6681
|
+
return metadata.packageName === packageName && metadata.className === className;
|
|
6682
|
+
});
|
|
6683
|
+
return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
|
|
6684
|
+
});
|
|
6685
|
+
return spaceTrim(function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
|
|
6686
|
+
.map(function (_a, i) {
|
|
6687
|
+
var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
|
|
6688
|
+
var more;
|
|
6689
|
+
if (just(false)) {
|
|
6690
|
+
more = '';
|
|
6691
|
+
}
|
|
6692
|
+
else if (!isMetadataAviailable && !isInstalled) {
|
|
6693
|
+
// TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
|
|
6694
|
+
more = "(not installed and no metadata, looks like a unexpected behavior)";
|
|
6695
|
+
}
|
|
6696
|
+
else if (isMetadataAviailable && !isInstalled) {
|
|
6697
|
+
// TODO: [�][�]
|
|
6698
|
+
more = "(not installed)";
|
|
6699
|
+
}
|
|
6700
|
+
else if (!isMetadataAviailable && isInstalled) {
|
|
6701
|
+
more = "(no metadata, looks like a unexpected behavior)";
|
|
6702
|
+
}
|
|
6703
|
+
else if (isMetadataAviailable && isInstalled) {
|
|
6704
|
+
more = "(installed)";
|
|
6705
|
+
}
|
|
6706
|
+
else {
|
|
6707
|
+
more = "(unknown state, looks like a unexpected behavior)";
|
|
6708
|
+
}
|
|
6709
|
+
return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
|
|
6710
|
+
})
|
|
6711
|
+
.join('\n')), "\n "); });
|
|
6912
6712
|
}
|
|
6913
|
-
/**
|
|
6914
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
6915
|
-
*/
|
|
6916
6713
|
|
|
6917
6714
|
/**
|
|
6918
|
-
*
|
|
6715
|
+
* @@@
|
|
6716
|
+
*
|
|
6717
|
+
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
6919
6718
|
*
|
|
6920
|
-
* @
|
|
6921
|
-
* @
|
|
6719
|
+
* @returns @@@
|
|
6720
|
+
* @public exported from `@promptbook/core`
|
|
6922
6721
|
*/
|
|
6923
|
-
|
|
6924
|
-
|
|
6925
|
-
|
|
6926
|
-
|
|
6927
|
-
|
|
6928
|
-
|
|
6929
|
-
|
|
6930
|
-
|
|
6931
|
-
|
|
6932
|
-
/**
|
|
6933
|
-
* Anthropic Claude API client.
|
|
6934
|
-
*/
|
|
6935
|
-
this.client = null;
|
|
6936
|
-
}
|
|
6937
|
-
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
6938
|
-
get: function () {
|
|
6939
|
-
return 'Anthropic Claude';
|
|
6940
|
-
},
|
|
6941
|
-
enumerable: false,
|
|
6942
|
-
configurable: true
|
|
6943
|
-
});
|
|
6944
|
-
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
|
|
6945
|
-
get: function () {
|
|
6946
|
-
return 'Use all models provided by Anthropic Claude';
|
|
6947
|
-
},
|
|
6948
|
-
enumerable: false,
|
|
6949
|
-
configurable: true
|
|
6950
|
-
});
|
|
6951
|
-
AnthropicClaudeExecutionTools.prototype.getClient = function () {
|
|
6952
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6953
|
-
var anthropicOptions;
|
|
6954
|
-
return __generator(this, function (_a) {
|
|
6955
|
-
if (this.client === null) {
|
|
6956
|
-
anthropicOptions = __assign({}, this.options);
|
|
6957
|
-
delete anthropicOptions.isVerbose;
|
|
6958
|
-
delete anthropicOptions.isProxied;
|
|
6959
|
-
this.client = new Anthropic(anthropicOptions);
|
|
6960
|
-
}
|
|
6961
|
-
return [2 /*return*/, this.client];
|
|
6962
|
-
});
|
|
6963
|
-
});
|
|
6964
|
-
};
|
|
6965
|
-
/**
|
|
6966
|
-
* Check the `options` passed to `constructor`
|
|
6967
|
-
*/
|
|
6968
|
-
AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
|
|
6969
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6970
|
-
return __generator(this, function (_a) {
|
|
6971
|
-
switch (_a.label) {
|
|
6972
|
-
case 0: return [4 /*yield*/, this.getClient()];
|
|
6973
|
-
case 1:
|
|
6974
|
-
_a.sent();
|
|
6975
|
-
return [2 /*return*/];
|
|
6976
|
-
}
|
|
6977
|
-
});
|
|
6722
|
+
function createLlmToolsFromConfiguration(configuration, options) {
|
|
6723
|
+
if (options === void 0) { options = {}; }
|
|
6724
|
+
var _a = options.isVerbose, isVerbose = _a === void 0 ? IS_VERBOSE : _a;
|
|
6725
|
+
var llmTools = configuration.map(function (llmConfiguration) {
|
|
6726
|
+
var registeredItem = $llmToolsRegister
|
|
6727
|
+
.list()
|
|
6728
|
+
.find(function (_a) {
|
|
6729
|
+
var packageName = _a.packageName, className = _a.className;
|
|
6730
|
+
return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
|
|
6978
6731
|
});
|
|
6979
|
-
|
|
6980
|
-
|
|
6981
|
-
* List all available Anthropic Claude models that can be used
|
|
6982
|
-
*/
|
|
6983
|
-
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
6984
|
-
return ANTHROPIC_CLAUDE_MODELS;
|
|
6985
|
-
};
|
|
6986
|
-
/**
|
|
6987
|
-
* Calls Anthropic Claude API to use a chat model.
|
|
6988
|
-
*/
|
|
6989
|
-
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6990
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6991
|
-
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
|
|
6992
|
-
return __generator(this, function (_a) {
|
|
6993
|
-
switch (_a.label) {
|
|
6994
|
-
case 0:
|
|
6995
|
-
if (this.options.isVerbose) {
|
|
6996
|
-
console.info('💬 Anthropic Claude callChatModel call');
|
|
6997
|
-
}
|
|
6998
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
6999
|
-
return [4 /*yield*/, this.getClient()];
|
|
7000
|
-
case 1:
|
|
7001
|
-
client = _a.sent();
|
|
7002
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7003
|
-
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7004
|
-
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7005
|
-
}
|
|
7006
|
-
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
7007
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7008
|
-
rawRequest = {
|
|
7009
|
-
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
7010
|
-
max_tokens: modelRequirements.maxTokens || 4096,
|
|
7011
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7012
|
-
temperature: modelRequirements.temperature,
|
|
7013
|
-
system: modelRequirements.systemMessage,
|
|
7014
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7015
|
-
// <- Note: [🧆]
|
|
7016
|
-
messages: [
|
|
7017
|
-
{
|
|
7018
|
-
role: 'user',
|
|
7019
|
-
content: rawPromptContent,
|
|
7020
|
-
},
|
|
7021
|
-
],
|
|
7022
|
-
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
7023
|
-
};
|
|
7024
|
-
start = getCurrentIsoDate();
|
|
7025
|
-
if (this.options.isVerbose) {
|
|
7026
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7027
|
-
}
|
|
7028
|
-
return [4 /*yield*/, client.messages.create(rawRequest)];
|
|
7029
|
-
case 2:
|
|
7030
|
-
rawResponse = _a.sent();
|
|
7031
|
-
if (this.options.isVerbose) {
|
|
7032
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7033
|
-
}
|
|
7034
|
-
if (!rawResponse.content[0]) {
|
|
7035
|
-
throw new PipelineExecutionError('No content from Anthropic Claude');
|
|
7036
|
-
}
|
|
7037
|
-
if (rawResponse.content.length > 1) {
|
|
7038
|
-
throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
|
|
7039
|
-
}
|
|
7040
|
-
contentBlock = rawResponse.content[0];
|
|
7041
|
-
if (contentBlock.type !== 'text') {
|
|
7042
|
-
throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
|
|
7043
|
-
}
|
|
7044
|
-
resultContent = contentBlock.text;
|
|
7045
|
-
// eslint-disable-next-line prefer-const
|
|
7046
|
-
complete = getCurrentIsoDate();
|
|
7047
|
-
usage = computeAnthropicClaudeUsage(content, '', rawResponse);
|
|
7048
|
-
return [2 /*return*/, {
|
|
7049
|
-
content: resultContent,
|
|
7050
|
-
modelName: rawResponse.model,
|
|
7051
|
-
timing: {
|
|
7052
|
-
start: start,
|
|
7053
|
-
complete: complete,
|
|
7054
|
-
},
|
|
7055
|
-
usage: usage,
|
|
7056
|
-
rawPromptContent: rawPromptContent,
|
|
7057
|
-
rawRequest: rawRequest,
|
|
7058
|
-
rawResponse: rawResponse,
|
|
7059
|
-
// <- [🗯]
|
|
7060
|
-
}];
|
|
7061
|
-
}
|
|
7062
|
-
});
|
|
7063
|
-
});
|
|
7064
|
-
};
|
|
7065
|
-
/*
|
|
7066
|
-
TODO: [👏]
|
|
7067
|
-
public async callCompletionModel(
|
|
7068
|
-
prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
|
|
7069
|
-
): Promise<PromptCompletionResult> {
|
|
7070
|
-
|
|
7071
|
-
if (this.options.isVerbose) {
|
|
7072
|
-
console.info('🖋 Anthropic Claude callCompletionModel call');
|
|
7073
|
-
}
|
|
7074
|
-
|
|
7075
|
-
const { content, parameters, modelRequirements } = prompt;
|
|
7076
|
-
|
|
7077
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7078
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7079
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
7080
|
-
}
|
|
7081
|
-
|
|
7082
|
-
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
7083
|
-
const modelSettings = {
|
|
7084
|
-
model: modelName,
|
|
7085
|
-
max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
|
|
7086
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7087
|
-
// <- TODO: Use here `systemMessage`, `temperature` and `seed`
|
|
7088
|
-
};
|
|
7089
|
-
|
|
7090
|
-
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
7091
|
-
...modelSettings,
|
|
7092
|
-
prompt: rawPromptContent,
|
|
7093
|
-
user: this.options.user,
|
|
7094
|
-
};
|
|
7095
|
-
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
7096
|
-
let complete: string_date_iso8601;
|
|
7097
|
-
|
|
7098
|
-
if (this.options.isVerbose) {
|
|
7099
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7100
|
-
}
|
|
7101
|
-
const rawResponse = await this.client.completions.create(rawRequest);
|
|
7102
|
-
if (this.options.isVerbose) {
|
|
7103
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7104
|
-
}
|
|
7105
|
-
|
|
7106
|
-
if (!rawResponse.choices[0]) {
|
|
7107
|
-
throw new PipelineExecutionError('No choises from Anthropic Claude');
|
|
7108
|
-
}
|
|
7109
|
-
|
|
7110
|
-
if (rawResponse.choices.length > 1) {
|
|
7111
|
-
// TODO: This should be maybe only warning
|
|
7112
|
-
throw new PipelineExecutionError('More than one choise from Anthropic Claude');
|
|
7113
|
-
}
|
|
7114
|
-
|
|
7115
|
-
const resultContent = rawResponse.choices[0].text;
|
|
7116
|
-
// eslint-disable-next-line prefer-const
|
|
7117
|
-
complete = getCurrentIsoDate();
|
|
7118
|
-
const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
|
|
7119
|
-
|
|
7120
|
-
|
|
7121
|
-
|
|
7122
|
-
return {
|
|
7123
|
-
content: resultContent,
|
|
7124
|
-
modelName: rawResponse.model || model,
|
|
7125
|
-
timing: {
|
|
7126
|
-
start,
|
|
7127
|
-
complete,
|
|
7128
|
-
},
|
|
7129
|
-
usage,
|
|
7130
|
-
rawResponse,
|
|
7131
|
-
// <- [🗯]
|
|
7132
|
-
};
|
|
7133
|
-
}
|
|
7134
|
-
*/
|
|
7135
|
-
// <- Note: [🤖] callXxxModel
|
|
7136
|
-
/**
|
|
7137
|
-
* Get the model that should be used as default
|
|
7138
|
-
*/
|
|
7139
|
-
AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
|
|
7140
|
-
var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
|
|
7141
|
-
var modelName = _a.modelName;
|
|
7142
|
-
return modelName.startsWith(defaultModelName);
|
|
7143
|
-
});
|
|
7144
|
-
if (model === undefined) {
|
|
7145
|
-
throw new UnexpectedError(spaceTrim(function (block) {
|
|
7146
|
-
return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
|
|
7147
|
-
var modelName = _a.modelName;
|
|
7148
|
-
return "- \"".concat(modelName, "\"");
|
|
7149
|
-
}).join('\n')), "\n\n ");
|
|
7150
|
-
}));
|
|
7151
|
-
}
|
|
7152
|
-
return model;
|
|
7153
|
-
};
|
|
7154
|
-
/**
|
|
7155
|
-
* Default model for chat variant.
|
|
7156
|
-
*/
|
|
7157
|
-
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
|
|
7158
|
-
return this.getDefaultModel('claude-3-opus');
|
|
7159
|
-
};
|
|
7160
|
-
return AnthropicClaudeExecutionTools;
|
|
7161
|
-
}());
|
|
7162
|
-
/**
|
|
7163
|
-
* TODO: [🍆] JSON mode
|
|
7164
|
-
* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
7165
|
-
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
7166
|
-
* TODO: Maybe make custom OpenAiError
|
|
7167
|
-
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
7168
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
7169
|
-
* TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
|
|
7170
|
-
*/
|
|
7171
|
-
|
|
7172
|
-
/**
|
|
7173
|
-
* Execution Tools for calling Anthropic Claude API.
|
|
7174
|
-
*
|
|
7175
|
-
* @public exported from `@promptbook/anthropic-claude`
|
|
7176
|
-
*/
|
|
7177
|
-
var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
|
|
7178
|
-
if (options.isProxied) {
|
|
7179
|
-
return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
|
|
7180
|
-
{
|
|
7181
|
-
title: 'Anthropic Claude (proxied)',
|
|
7182
|
-
packageName: '@promptbook/anthropic-claude',
|
|
7183
|
-
className: 'AnthropicClaudeExecutionTools',
|
|
7184
|
-
options: __assign(__assign({}, options), { isProxied: false }),
|
|
7185
|
-
},
|
|
7186
|
-
], models: ANTHROPIC_CLAUDE_MODELS }));
|
|
7187
|
-
}
|
|
7188
|
-
return new AnthropicClaudeExecutionTools(options);
|
|
7189
|
-
}, {
|
|
7190
|
-
packageName: '@promptbook/anthropic-claude',
|
|
7191
|
-
className: 'AnthropicClaudeExecutionTools',
|
|
7192
|
-
});
|
|
7193
|
-
/**
|
|
7194
|
-
* TODO: [🧠] !!!! Make anonymous this with all LLM providers
|
|
7195
|
-
* TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
7196
|
-
* TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
|
|
7197
|
-
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
7198
|
-
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
7199
|
-
*/
|
|
7200
|
-
|
|
7201
|
-
/**
|
|
7202
|
-
* List of available OpenAI models with pricing
|
|
7203
|
-
*
|
|
7204
|
-
* Note: Done at 2024-05-20
|
|
7205
|
-
*
|
|
7206
|
-
* @see https://platform.openai.com/docs/models/
|
|
7207
|
-
* @see https://openai.com/api/pricing/
|
|
7208
|
-
* @public exported from `@promptbook/openai`
|
|
7209
|
-
*/
|
|
7210
|
-
var OPENAI_MODELS = [
|
|
7211
|
-
/*/
|
|
7212
|
-
{
|
|
7213
|
-
modelTitle: 'dall-e-3',
|
|
7214
|
-
modelName: 'dall-e-3',
|
|
7215
|
-
},
|
|
7216
|
-
/**/
|
|
7217
|
-
/*/
|
|
7218
|
-
{
|
|
7219
|
-
modelTitle: 'whisper-1',
|
|
7220
|
-
modelName: 'whisper-1',
|
|
7221
|
-
},
|
|
7222
|
-
/**/
|
|
7223
|
-
/**/
|
|
7224
|
-
{
|
|
7225
|
-
modelVariant: 'COMPLETION',
|
|
7226
|
-
modelTitle: 'davinci-002',
|
|
7227
|
-
modelName: 'davinci-002',
|
|
7228
|
-
pricing: {
|
|
7229
|
-
prompt: computeUsage("$2.00 / 1M tokens"),
|
|
7230
|
-
output: computeUsage("$2.00 / 1M tokens"), // <- not sure
|
|
7231
|
-
},
|
|
7232
|
-
},
|
|
7233
|
-
/**/
|
|
7234
|
-
/*/
|
|
7235
|
-
{
|
|
7236
|
-
modelTitle: 'dall-e-2',
|
|
7237
|
-
modelName: 'dall-e-2',
|
|
7238
|
-
},
|
|
7239
|
-
/**/
|
|
7240
|
-
/**/
|
|
7241
|
-
{
|
|
7242
|
-
modelVariant: 'CHAT',
|
|
7243
|
-
modelTitle: 'gpt-3.5-turbo-16k',
|
|
7244
|
-
modelName: 'gpt-3.5-turbo-16k',
|
|
7245
|
-
pricing: {
|
|
7246
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
7247
|
-
output: computeUsage("$4.00 / 1M tokens"),
|
|
7248
|
-
},
|
|
7249
|
-
},
|
|
7250
|
-
/**/
|
|
7251
|
-
/*/
|
|
7252
|
-
{
|
|
7253
|
-
modelTitle: 'tts-1-hd-1106',
|
|
7254
|
-
modelName: 'tts-1-hd-1106',
|
|
7255
|
-
},
|
|
7256
|
-
/**/
|
|
7257
|
-
/*/
|
|
7258
|
-
{
|
|
7259
|
-
modelTitle: 'tts-1-hd',
|
|
7260
|
-
modelName: 'tts-1-hd',
|
|
7261
|
-
},
|
|
7262
|
-
/**/
|
|
7263
|
-
/**/
|
|
7264
|
-
{
|
|
7265
|
-
modelVariant: 'CHAT',
|
|
7266
|
-
modelTitle: 'gpt-4',
|
|
7267
|
-
modelName: 'gpt-4',
|
|
7268
|
-
pricing: {
|
|
7269
|
-
prompt: computeUsage("$30.00 / 1M tokens"),
|
|
7270
|
-
output: computeUsage("$60.00 / 1M tokens"),
|
|
7271
|
-
},
|
|
7272
|
-
},
|
|
7273
|
-
/**/
|
|
7274
|
-
/**/
|
|
7275
|
-
{
|
|
7276
|
-
modelVariant: 'CHAT',
|
|
7277
|
-
modelTitle: 'gpt-4-32k',
|
|
7278
|
-
modelName: 'gpt-4-32k',
|
|
7279
|
-
pricing: {
|
|
7280
|
-
prompt: computeUsage("$60.00 / 1M tokens"),
|
|
7281
|
-
output: computeUsage("$120.00 / 1M tokens"),
|
|
7282
|
-
},
|
|
7283
|
-
},
|
|
7284
|
-
/**/
|
|
7285
|
-
/*/
|
|
7286
|
-
{
|
|
7287
|
-
modelVariant: 'CHAT',
|
|
7288
|
-
modelTitle: 'gpt-4-0613',
|
|
7289
|
-
modelName: 'gpt-4-0613',
|
|
7290
|
-
pricing: {
|
|
7291
|
-
prompt: computeUsage(` / 1M tokens`),
|
|
7292
|
-
output: computeUsage(` / 1M tokens`),
|
|
7293
|
-
},
|
|
7294
|
-
},
|
|
7295
|
-
/**/
|
|
7296
|
-
/**/
|
|
7297
|
-
{
|
|
7298
|
-
modelVariant: 'CHAT',
|
|
7299
|
-
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
7300
|
-
modelName: 'gpt-4-turbo-2024-04-09',
|
|
7301
|
-
pricing: {
|
|
7302
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7303
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7304
|
-
},
|
|
7305
|
-
},
|
|
7306
|
-
/**/
|
|
7307
|
-
/**/
|
|
7308
|
-
{
|
|
7309
|
-
modelVariant: 'CHAT',
|
|
7310
|
-
modelTitle: 'gpt-3.5-turbo-1106',
|
|
7311
|
-
modelName: 'gpt-3.5-turbo-1106',
|
|
7312
|
-
pricing: {
|
|
7313
|
-
prompt: computeUsage("$1.00 / 1M tokens"),
|
|
7314
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7315
|
-
},
|
|
7316
|
-
},
|
|
7317
|
-
/**/
|
|
7318
|
-
/**/
|
|
7319
|
-
{
|
|
7320
|
-
modelVariant: 'CHAT',
|
|
7321
|
-
modelTitle: 'gpt-4-turbo',
|
|
7322
|
-
modelName: 'gpt-4-turbo',
|
|
7323
|
-
pricing: {
|
|
7324
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7325
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7326
|
-
},
|
|
7327
|
-
},
|
|
7328
|
-
/**/
|
|
7329
|
-
/**/
|
|
7330
|
-
{
|
|
7331
|
-
modelVariant: 'COMPLETION',
|
|
7332
|
-
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
7333
|
-
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
7334
|
-
pricing: {
|
|
7335
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7336
|
-
output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
|
|
7337
|
-
},
|
|
7338
|
-
},
|
|
7339
|
-
/**/
|
|
7340
|
-
/**/
|
|
7341
|
-
{
|
|
7342
|
-
modelVariant: 'COMPLETION',
|
|
7343
|
-
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
7344
|
-
modelName: 'gpt-3.5-turbo-instruct',
|
|
7345
|
-
pricing: {
|
|
7346
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7347
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7348
|
-
},
|
|
7349
|
-
},
|
|
7350
|
-
/**/
|
|
7351
|
-
/*/
|
|
7352
|
-
{
|
|
7353
|
-
modelTitle: 'tts-1',
|
|
7354
|
-
modelName: 'tts-1',
|
|
7355
|
-
},
|
|
7356
|
-
/**/
|
|
7357
|
-
/**/
|
|
7358
|
-
{
|
|
7359
|
-
modelVariant: 'CHAT',
|
|
7360
|
-
modelTitle: 'gpt-3.5-turbo',
|
|
7361
|
-
modelName: 'gpt-3.5-turbo',
|
|
7362
|
-
pricing: {
|
|
7363
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
7364
|
-
output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
|
|
7365
|
-
},
|
|
7366
|
-
},
|
|
7367
|
-
/**/
|
|
7368
|
-
/**/
|
|
7369
|
-
{
|
|
7370
|
-
modelVariant: 'CHAT',
|
|
7371
|
-
modelTitle: 'gpt-3.5-turbo-0301',
|
|
7372
|
-
modelName: 'gpt-3.5-turbo-0301',
|
|
7373
|
-
pricing: {
|
|
7374
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7375
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7376
|
-
},
|
|
7377
|
-
},
|
|
7378
|
-
/**/
|
|
7379
|
-
/**/
|
|
7380
|
-
{
|
|
7381
|
-
modelVariant: 'COMPLETION',
|
|
7382
|
-
modelTitle: 'babbage-002',
|
|
7383
|
-
modelName: 'babbage-002',
|
|
7384
|
-
pricing: {
|
|
7385
|
-
prompt: computeUsage("$0.40 / 1M tokens"),
|
|
7386
|
-
output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
|
|
7387
|
-
},
|
|
7388
|
-
},
|
|
7389
|
-
/**/
|
|
7390
|
-
/**/
|
|
7391
|
-
{
|
|
7392
|
-
modelVariant: 'CHAT',
|
|
7393
|
-
modelTitle: 'gpt-4-1106-preview',
|
|
7394
|
-
modelName: 'gpt-4-1106-preview',
|
|
7395
|
-
pricing: {
|
|
7396
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7397
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7398
|
-
},
|
|
7399
|
-
},
|
|
7400
|
-
/**/
|
|
7401
|
-
/**/
|
|
7402
|
-
{
|
|
7403
|
-
modelVariant: 'CHAT',
|
|
7404
|
-
modelTitle: 'gpt-4-0125-preview',
|
|
7405
|
-
modelName: 'gpt-4-0125-preview',
|
|
7406
|
-
pricing: {
|
|
7407
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7408
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7409
|
-
},
|
|
7410
|
-
},
|
|
7411
|
-
/**/
|
|
7412
|
-
/*/
|
|
7413
|
-
{
|
|
7414
|
-
modelTitle: 'tts-1-1106',
|
|
7415
|
-
modelName: 'tts-1-1106',
|
|
7416
|
-
},
|
|
7417
|
-
/**/
|
|
7418
|
-
/**/
|
|
7419
|
-
{
|
|
7420
|
-
modelVariant: 'CHAT',
|
|
7421
|
-
modelTitle: 'gpt-3.5-turbo-0125',
|
|
7422
|
-
modelName: 'gpt-3.5-turbo-0125',
|
|
7423
|
-
pricing: {
|
|
7424
|
-
prompt: computeUsage("$0.50 / 1M tokens"),
|
|
7425
|
-
output: computeUsage("$1.50 / 1M tokens"),
|
|
7426
|
-
},
|
|
7427
|
-
},
|
|
7428
|
-
/**/
|
|
7429
|
-
/**/
|
|
7430
|
-
{
|
|
7431
|
-
modelVariant: 'CHAT',
|
|
7432
|
-
modelTitle: 'gpt-4-turbo-preview',
|
|
7433
|
-
modelName: 'gpt-4-turbo-preview',
|
|
7434
|
-
pricing: {
|
|
7435
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7436
|
-
output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
|
|
7437
|
-
},
|
|
7438
|
-
},
|
|
7439
|
-
/**/
|
|
7440
|
-
/**/
|
|
7441
|
-
{
|
|
7442
|
-
modelVariant: 'EMBEDDING',
|
|
7443
|
-
modelTitle: 'text-embedding-3-large',
|
|
7444
|
-
modelName: 'text-embedding-3-large',
|
|
7445
|
-
pricing: {
|
|
7446
|
-
prompt: computeUsage("$0.13 / 1M tokens"),
|
|
7447
|
-
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
7448
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
7449
|
-
},
|
|
7450
|
-
},
|
|
7451
|
-
/**/
|
|
7452
|
-
/**/
|
|
7453
|
-
{
|
|
7454
|
-
modelVariant: 'EMBEDDING',
|
|
7455
|
-
modelTitle: 'text-embedding-3-small',
|
|
7456
|
-
modelName: 'text-embedding-3-small',
|
|
7457
|
-
pricing: {
|
|
7458
|
-
prompt: computeUsage("$0.02 / 1M tokens"),
|
|
7459
|
-
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
7460
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
7461
|
-
},
|
|
7462
|
-
},
|
|
7463
|
-
/**/
|
|
7464
|
-
/**/
|
|
7465
|
-
{
|
|
7466
|
-
modelVariant: 'CHAT',
|
|
7467
|
-
modelTitle: 'gpt-3.5-turbo-0613',
|
|
7468
|
-
modelName: 'gpt-3.5-turbo-0613',
|
|
7469
|
-
pricing: {
|
|
7470
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7471
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7472
|
-
},
|
|
7473
|
-
},
|
|
7474
|
-
/**/
|
|
7475
|
-
/**/
|
|
7476
|
-
{
|
|
7477
|
-
modelVariant: 'EMBEDDING',
|
|
7478
|
-
modelTitle: 'text-embedding-ada-002',
|
|
7479
|
-
modelName: 'text-embedding-ada-002',
|
|
7480
|
-
pricing: {
|
|
7481
|
-
prompt: computeUsage("$0.1 / 1M tokens"),
|
|
7482
|
-
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
7483
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
7484
|
-
},
|
|
7485
|
-
},
|
|
7486
|
-
/**/
|
|
7487
|
-
/*/
|
|
7488
|
-
{
|
|
7489
|
-
modelVariant: 'CHAT',
|
|
7490
|
-
modelTitle: 'gpt-4-1106-vision-preview',
|
|
7491
|
-
modelName: 'gpt-4-1106-vision-preview',
|
|
7492
|
-
},
|
|
7493
|
-
/**/
|
|
7494
|
-
/*/
|
|
7495
|
-
{
|
|
7496
|
-
modelVariant: 'CHAT',
|
|
7497
|
-
modelTitle: 'gpt-4-vision-preview',
|
|
7498
|
-
modelName: 'gpt-4-vision-preview',
|
|
7499
|
-
pricing: {
|
|
7500
|
-
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
7501
|
-
output: computeUsage(`$30.00 / 1M tokens`),
|
|
7502
|
-
},
|
|
7503
|
-
},
|
|
7504
|
-
/**/
|
|
7505
|
-
/**/
|
|
7506
|
-
{
|
|
7507
|
-
modelVariant: 'CHAT',
|
|
7508
|
-
modelTitle: 'gpt-4o-2024-05-13',
|
|
7509
|
-
modelName: 'gpt-4o-2024-05-13',
|
|
7510
|
-
pricing: {
|
|
7511
|
-
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
7512
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
7513
|
-
},
|
|
7514
|
-
},
|
|
7515
|
-
/**/
|
|
7516
|
-
/**/
|
|
7517
|
-
{
|
|
7518
|
-
modelVariant: 'CHAT',
|
|
7519
|
-
modelTitle: 'gpt-4o',
|
|
7520
|
-
modelName: 'gpt-4o',
|
|
7521
|
-
pricing: {
|
|
7522
|
-
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
7523
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
7524
|
-
},
|
|
7525
|
-
},
|
|
7526
|
-
/**/
|
|
7527
|
-
/**/
|
|
7528
|
-
{
|
|
7529
|
-
modelVariant: 'CHAT',
|
|
7530
|
-
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
7531
|
-
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
7532
|
-
pricing: {
|
|
7533
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
7534
|
-
output: computeUsage("$4.00 / 1M tokens"),
|
|
7535
|
-
},
|
|
7536
|
-
},
|
|
7537
|
-
/**/
|
|
7538
|
-
];
|
|
7539
|
-
/**
|
|
7540
|
-
* Note: [🤖] Add models of new variant
|
|
7541
|
-
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
7542
|
-
* TODO: [🎰] Some mechanism to auto-update available models
|
|
7543
|
-
* TODO: [🎰][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
7544
|
-
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
7545
|
-
* @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
7546
|
-
* @see https://openai.com/api/pricing/
|
|
7547
|
-
* @see /other/playground/playground.ts
|
|
7548
|
-
* TODO: [🍓] Make better
|
|
7549
|
-
* TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
|
|
7550
|
-
* TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
|
|
7551
|
-
*/
|
|
7552
|
-
|
|
7553
|
-
/**
|
|
7554
|
-
* Execution Tools for calling Azure OpenAI API.
|
|
7555
|
-
*
|
|
7556
|
-
* @public exported from `@promptbook/azure-openai`
|
|
7557
|
-
*/
|
|
7558
|
-
var AzureOpenAiExecutionTools = /** @class */ (function () {
|
|
7559
|
-
/**
|
|
7560
|
-
* Creates OpenAI Execution Tools.
|
|
7561
|
-
*
|
|
7562
|
-
* @param options which are relevant are directly passed to the OpenAI client
|
|
7563
|
-
*/
|
|
7564
|
-
function AzureOpenAiExecutionTools(options) {
|
|
7565
|
-
this.options = options;
|
|
7566
|
-
/**
|
|
7567
|
-
* OpenAI Azure API client.
|
|
7568
|
-
*/
|
|
7569
|
-
this.client = null;
|
|
7570
|
-
}
|
|
7571
|
-
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
|
|
7572
|
-
get: function () {
|
|
7573
|
-
return 'Azure OpenAI';
|
|
7574
|
-
},
|
|
7575
|
-
enumerable: false,
|
|
7576
|
-
configurable: true
|
|
7577
|
-
});
|
|
7578
|
-
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
|
|
7579
|
-
get: function () {
|
|
7580
|
-
return 'Use all models trained by OpenAI provided by Azure';
|
|
7581
|
-
},
|
|
7582
|
-
enumerable: false,
|
|
7583
|
-
configurable: true
|
|
7584
|
-
});
|
|
7585
|
-
AzureOpenAiExecutionTools.prototype.getClient = function () {
|
|
7586
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7587
|
-
return __generator(this, function (_a) {
|
|
7588
|
-
if (this.client === null) {
|
|
7589
|
-
this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
|
|
7590
|
-
}
|
|
7591
|
-
return [2 /*return*/, this.client];
|
|
7592
|
-
});
|
|
7593
|
-
});
|
|
7594
|
-
};
|
|
7595
|
-
/**
|
|
7596
|
-
* Check the `options` passed to `constructor`
|
|
7597
|
-
*/
|
|
7598
|
-
AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
7599
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7600
|
-
return __generator(this, function (_a) {
|
|
7601
|
-
switch (_a.label) {
|
|
7602
|
-
case 0: return [4 /*yield*/, this.getClient()];
|
|
7603
|
-
case 1:
|
|
7604
|
-
_a.sent();
|
|
7605
|
-
return [2 /*return*/];
|
|
7606
|
-
}
|
|
7607
|
-
});
|
|
7608
|
-
});
|
|
7609
|
-
};
|
|
7610
|
-
/**
|
|
7611
|
-
* List all available Azure OpenAI models that can be used
|
|
7612
|
-
*/
|
|
7613
|
-
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
7614
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7615
|
-
return __generator(this, function (_a) {
|
|
7616
|
-
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
7617
|
-
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
7618
|
-
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
7619
|
-
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
7620
|
-
return ({
|
|
7621
|
-
modelTitle: "Azure ".concat(modelTitle),
|
|
7622
|
-
modelName: modelName,
|
|
7623
|
-
modelVariant: modelVariant,
|
|
7624
|
-
});
|
|
7625
|
-
})];
|
|
7626
|
-
});
|
|
7627
|
-
});
|
|
7628
|
-
};
|
|
7629
|
-
/**
|
|
7630
|
-
* Calls OpenAI API to use a chat model.
|
|
7631
|
-
*/
|
|
7632
|
-
AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7633
|
-
var _a, _b;
|
|
7634
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7635
|
-
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
7636
|
-
return __generator(this, function (_c) {
|
|
7637
|
-
switch (_c.label) {
|
|
7638
|
-
case 0:
|
|
7639
|
-
if (this.options.isVerbose) {
|
|
7640
|
-
console.info('💬 OpenAI callChatModel call');
|
|
7641
|
-
}
|
|
7642
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7643
|
-
return [4 /*yield*/, this.getClient()];
|
|
7644
|
-
case 1:
|
|
7645
|
-
client = _c.sent();
|
|
7646
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7647
|
-
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7648
|
-
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7649
|
-
}
|
|
7650
|
-
_c.label = 2;
|
|
7651
|
-
case 2:
|
|
7652
|
-
_c.trys.push([2, 4, , 5]);
|
|
7653
|
-
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7654
|
-
modelSettings = {
|
|
7655
|
-
maxTokens: modelRequirements.maxTokens,
|
|
7656
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7657
|
-
temperature: modelRequirements.temperature,
|
|
7658
|
-
user: this.options.user,
|
|
7659
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7660
|
-
// <- Note: [🧆]
|
|
7661
|
-
};
|
|
7662
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7663
|
-
messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
7664
|
-
? []
|
|
7665
|
-
: [
|
|
7666
|
-
{
|
|
7667
|
-
role: 'system',
|
|
7668
|
-
content: modelRequirements.systemMessage,
|
|
7669
|
-
},
|
|
7670
|
-
])), false), [
|
|
7671
|
-
{
|
|
7672
|
-
role: 'user',
|
|
7673
|
-
content: rawPromptContent,
|
|
7674
|
-
},
|
|
7675
|
-
], false);
|
|
7676
|
-
start = getCurrentIsoDate();
|
|
7677
|
-
complete = void 0;
|
|
7678
|
-
if (this.options.isVerbose) {
|
|
7679
|
-
console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
|
|
7680
|
-
}
|
|
7681
|
-
rawRequest = [modelName, messages, modelSettings];
|
|
7682
|
-
return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
7683
|
-
case 3:
|
|
7684
|
-
rawResponse = _c.sent();
|
|
7685
|
-
if (this.options.isVerbose) {
|
|
7686
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7687
|
-
}
|
|
7688
|
-
if (!rawResponse.choices[0]) {
|
|
7689
|
-
throw new PipelineExecutionError('No choises from Azure OpenAI');
|
|
7690
|
-
}
|
|
7691
|
-
if (rawResponse.choices.length > 1) {
|
|
7692
|
-
// TODO: This should be maybe only warning
|
|
7693
|
-
throw new PipelineExecutionError('More than one choise from Azure OpenAI');
|
|
7694
|
-
}
|
|
7695
|
-
if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
|
|
7696
|
-
throw new PipelineExecutionError('Empty response from Azure OpenAI');
|
|
7697
|
-
}
|
|
7698
|
-
resultContent = rawResponse.choices[0].message.content;
|
|
7699
|
-
// eslint-disable-next-line prefer-const
|
|
7700
|
-
complete = getCurrentIsoDate();
|
|
7701
|
-
usage = {
|
|
7702
|
-
price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
|
|
7703
|
-
input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
|
|
7704
|
-
output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
|
|
7705
|
-
};
|
|
7706
|
-
return [2 /*return*/, {
|
|
7707
|
-
content: resultContent,
|
|
7708
|
-
modelName: modelName,
|
|
7709
|
-
timing: {
|
|
7710
|
-
start: start,
|
|
7711
|
-
complete: complete,
|
|
7712
|
-
},
|
|
7713
|
-
usage: usage,
|
|
7714
|
-
rawPromptContent: rawPromptContent,
|
|
7715
|
-
rawRequest: rawRequest,
|
|
7716
|
-
rawResponse: rawResponse,
|
|
7717
|
-
// <- [🗯]
|
|
7718
|
-
}];
|
|
7719
|
-
case 4:
|
|
7720
|
-
error_1 = _c.sent();
|
|
7721
|
-
throw this.transformAzureError(error_1);
|
|
7722
|
-
case 5: return [2 /*return*/];
|
|
7723
|
-
}
|
|
7724
|
-
});
|
|
7725
|
-
});
|
|
7726
|
-
};
|
|
7727
|
-
/**
|
|
7728
|
-
* Calls Azure OpenAI API to use a complete model.
|
|
7729
|
-
*/
|
|
7730
|
-
AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
7731
|
-
var _a, _b;
|
|
7732
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7733
|
-
var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
7734
|
-
return __generator(this, function (_c) {
|
|
7735
|
-
switch (_c.label) {
|
|
7736
|
-
case 0:
|
|
7737
|
-
if (this.options.isVerbose) {
|
|
7738
|
-
console.info('🖋 OpenAI callCompletionModel call');
|
|
7739
|
-
}
|
|
7740
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7741
|
-
return [4 /*yield*/, this.getClient()];
|
|
7742
|
-
case 1:
|
|
7743
|
-
client = _c.sent();
|
|
7744
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7745
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7746
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
7747
|
-
}
|
|
7748
|
-
_c.label = 2;
|
|
7749
|
-
case 2:
|
|
7750
|
-
_c.trys.push([2, 4, , 5]);
|
|
7751
|
-
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7752
|
-
modelSettings = {
|
|
7753
|
-
maxTokens: modelRequirements.maxTokens || 2000,
|
|
7754
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7755
|
-
temperature: modelRequirements.temperature,
|
|
7756
|
-
user: this.options.user,
|
|
7757
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7758
|
-
// <- Note: [🧆]
|
|
7759
|
-
};
|
|
7760
|
-
start = getCurrentIsoDate();
|
|
7761
|
-
complete = void 0;
|
|
7762
|
-
if (this.options.isVerbose) {
|
|
7763
|
-
console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
|
|
7764
|
-
console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
|
|
7765
|
-
}
|
|
7766
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7767
|
-
rawRequest = [
|
|
7768
|
-
modelName,
|
|
7769
|
-
[rawPromptContent],
|
|
7770
|
-
modelSettings,
|
|
7771
|
-
];
|
|
7772
|
-
return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
7773
|
-
case 3:
|
|
7774
|
-
rawResponse = _c.sent();
|
|
7775
|
-
if (this.options.isVerbose) {
|
|
7776
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7777
|
-
}
|
|
7778
|
-
if (!rawResponse.choices[0]) {
|
|
7779
|
-
throw new PipelineExecutionError('No choises from OpenAI');
|
|
7780
|
-
}
|
|
7781
|
-
if (rawResponse.choices.length > 1) {
|
|
7782
|
-
// TODO: This should be maybe only warning
|
|
7783
|
-
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
7784
|
-
}
|
|
7785
|
-
resultContent = rawResponse.choices[0].text;
|
|
7786
|
-
// eslint-disable-next-line prefer-const
|
|
7787
|
-
complete = getCurrentIsoDate();
|
|
7788
|
-
usage = {
|
|
7789
|
-
price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
|
|
7790
|
-
input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
|
|
7791
|
-
output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
|
|
7792
|
-
};
|
|
7793
|
-
return [2 /*return*/, {
|
|
7794
|
-
content: resultContent,
|
|
7795
|
-
modelName: modelName,
|
|
7796
|
-
timing: {
|
|
7797
|
-
start: start,
|
|
7798
|
-
complete: complete,
|
|
7799
|
-
},
|
|
7800
|
-
usage: usage,
|
|
7801
|
-
rawPromptContent: rawPromptContent,
|
|
7802
|
-
rawRequest: rawRequest,
|
|
7803
|
-
rawResponse: rawResponse,
|
|
7804
|
-
// <- [🗯]
|
|
7805
|
-
}];
|
|
7806
|
-
case 4:
|
|
7807
|
-
error_2 = _c.sent();
|
|
7808
|
-
throw this.transformAzureError(error_2);
|
|
7809
|
-
case 5: return [2 /*return*/];
|
|
7810
|
-
}
|
|
7811
|
-
});
|
|
7812
|
-
});
|
|
7813
|
-
};
|
|
7814
|
-
// <- Note: [🤖] callXxxModel
|
|
7815
|
-
/**
|
|
7816
|
-
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
7817
|
-
*/
|
|
7818
|
-
AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
|
|
7819
|
-
if (typeof azureError !== 'object' || azureError === null) {
|
|
7820
|
-
return new PipelineExecutionError("Unknown Azure OpenAI error");
|
|
7821
|
-
}
|
|
7822
|
-
var code = azureError.code, message = azureError.message;
|
|
7823
|
-
return new PipelineExecutionError("".concat(code, ": ").concat(message));
|
|
7824
|
-
};
|
|
7825
|
-
return AzureOpenAiExecutionTools;
|
|
7826
|
-
}());
|
|
7827
|
-
/**
|
|
7828
|
-
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
7829
|
-
* TODO: Maybe make custom AzureOpenAiError
|
|
7830
|
-
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
7831
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
7832
|
-
*/
|
|
7833
|
-
|
|
7834
|
-
/**
|
|
7835
|
-
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
7836
|
-
*
|
|
7837
|
-
* @param promptContent The content of the prompt
|
|
7838
|
-
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
7839
|
-
* @param rawResponse The raw response from OpenAI API
|
|
7840
|
-
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
7841
|
-
* @private internal utility of `OpenAiExecutionTools`
|
|
7842
|
-
*/
|
|
7843
|
-
function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
7844
|
-
resultContent, rawResponse) {
|
|
7845
|
-
var _a, _b;
|
|
7846
|
-
if (rawResponse.usage === undefined) {
|
|
7847
|
-
throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
|
|
7848
|
-
}
|
|
7849
|
-
if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
|
|
7850
|
-
throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
|
|
7851
|
-
}
|
|
7852
|
-
var inputTokens = rawResponse.usage.prompt_tokens;
|
|
7853
|
-
var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
|
|
7854
|
-
var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
|
|
7855
|
-
var price;
|
|
7856
|
-
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
7857
|
-
price = uncertainNumber();
|
|
7858
|
-
}
|
|
7859
|
-
else {
|
|
7860
|
-
price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
|
|
7861
|
-
}
|
|
7862
|
-
return {
|
|
7863
|
-
price: price,
|
|
7864
|
-
input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
|
|
7865
|
-
output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
|
|
7866
|
-
};
|
|
7867
|
-
}
|
|
7868
|
-
/**
|
|
7869
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
7870
|
-
*/
|
|
7871
|
-
|
|
7872
|
-
/**
|
|
7873
|
-
* Execution Tools for calling OpenAI API
|
|
7874
|
-
*
|
|
7875
|
-
* @public exported from `@promptbook/openai`
|
|
7876
|
-
*/
|
|
7877
|
-
var OpenAiExecutionTools = /** @class */ (function () {
|
|
7878
|
-
/**
|
|
7879
|
-
* Creates OpenAI Execution Tools.
|
|
7880
|
-
*
|
|
7881
|
-
* @param options which are relevant are directly passed to the OpenAI client
|
|
7882
|
-
*/
|
|
7883
|
-
function OpenAiExecutionTools(options) {
|
|
7884
|
-
if (options === void 0) { options = {}; }
|
|
7885
|
-
this.options = options;
|
|
7886
|
-
/**
|
|
7887
|
-
* OpenAI API client.
|
|
7888
|
-
*/
|
|
7889
|
-
this.client = null;
|
|
7890
|
-
}
|
|
7891
|
-
Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
|
|
7892
|
-
get: function () {
|
|
7893
|
-
return 'OpenAI';
|
|
7894
|
-
},
|
|
7895
|
-
enumerable: false,
|
|
7896
|
-
configurable: true
|
|
7897
|
-
});
|
|
7898
|
-
Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
|
|
7899
|
-
get: function () {
|
|
7900
|
-
return 'Use all models provided by OpenAI';
|
|
7901
|
-
},
|
|
7902
|
-
enumerable: false,
|
|
7903
|
-
configurable: true
|
|
7904
|
-
});
|
|
7905
|
-
OpenAiExecutionTools.prototype.getClient = function () {
|
|
7906
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7907
|
-
var openAiOptions;
|
|
7908
|
-
return __generator(this, function (_a) {
|
|
7909
|
-
if (this.client === null) {
|
|
7910
|
-
openAiOptions = __assign({}, this.options);
|
|
7911
|
-
delete openAiOptions.isVerbose;
|
|
7912
|
-
delete openAiOptions.user;
|
|
7913
|
-
this.client = new OpenAI(__assign({}, openAiOptions));
|
|
7914
|
-
}
|
|
7915
|
-
return [2 /*return*/, this.client];
|
|
7916
|
-
});
|
|
7917
|
-
});
|
|
7918
|
-
};
|
|
7919
|
-
/**
|
|
7920
|
-
* Check the `options` passed to `constructor`
|
|
7921
|
-
*/
|
|
7922
|
-
OpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
7923
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7924
|
-
return __generator(this, function (_a) {
|
|
7925
|
-
switch (_a.label) {
|
|
7926
|
-
case 0: return [4 /*yield*/, this.getClient()];
|
|
7927
|
-
case 1:
|
|
7928
|
-
_a.sent();
|
|
7929
|
-
return [2 /*return*/];
|
|
7930
|
-
}
|
|
7931
|
-
});
|
|
7932
|
-
});
|
|
7933
|
-
};
|
|
7934
|
-
/**
|
|
7935
|
-
* List all available OpenAI models that can be used
|
|
7936
|
-
*/
|
|
7937
|
-
OpenAiExecutionTools.prototype.listModels = function () {
|
|
7938
|
-
/*
|
|
7939
|
-
Note: Dynamic lising of the models
|
|
7940
|
-
const models = await this.openai.models.list({});
|
|
7941
|
-
|
|
7942
|
-
console.log({ models });
|
|
7943
|
-
console.log(models.data);
|
|
7944
|
-
*/
|
|
7945
|
-
return OPENAI_MODELS;
|
|
7946
|
-
};
|
|
7947
|
-
/**
|
|
7948
|
-
* Calls OpenAI API to use a chat model.
|
|
7949
|
-
*/
|
|
7950
|
-
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7951
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7952
|
-
var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7953
|
-
return __generator(this, function (_a) {
|
|
7954
|
-
switch (_a.label) {
|
|
7955
|
-
case 0:
|
|
7956
|
-
if (this.options.isVerbose) {
|
|
7957
|
-
console.info('💬 OpenAI callChatModel call', { prompt: prompt });
|
|
7958
|
-
}
|
|
7959
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
|
|
7960
|
-
return [4 /*yield*/, this.getClient()];
|
|
7961
|
-
case 1:
|
|
7962
|
-
client = _a.sent();
|
|
7963
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7964
|
-
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7965
|
-
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7966
|
-
}
|
|
7967
|
-
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
7968
|
-
modelSettings = {
|
|
7969
|
-
model: modelName,
|
|
7970
|
-
max_tokens: modelRequirements.maxTokens,
|
|
7971
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7972
|
-
temperature: modelRequirements.temperature,
|
|
7973
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7974
|
-
// <- Note: [🧆]
|
|
7975
|
-
};
|
|
7976
|
-
if (expectFormat === 'JSON') {
|
|
7977
|
-
modelSettings.response_format = {
|
|
7978
|
-
type: 'json_object',
|
|
7979
|
-
};
|
|
7980
|
-
}
|
|
7981
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7982
|
-
rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
7983
|
-
? []
|
|
7984
|
-
: [
|
|
7985
|
-
{
|
|
7986
|
-
role: 'system',
|
|
7987
|
-
content: modelRequirements.systemMessage,
|
|
7988
|
-
},
|
|
7989
|
-
])), false), [
|
|
7990
|
-
{
|
|
7991
|
-
role: 'user',
|
|
7992
|
-
content: rawPromptContent,
|
|
7993
|
-
},
|
|
7994
|
-
], false), user: this.options.user });
|
|
7995
|
-
start = getCurrentIsoDate();
|
|
7996
|
-
if (this.options.isVerbose) {
|
|
7997
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7998
|
-
}
|
|
7999
|
-
return [4 /*yield*/, client.chat.completions.create(rawRequest)];
|
|
8000
|
-
case 2:
|
|
8001
|
-
rawResponse = _a.sent();
|
|
8002
|
-
if (this.options.isVerbose) {
|
|
8003
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
8004
|
-
}
|
|
8005
|
-
if (!rawResponse.choices[0]) {
|
|
8006
|
-
throw new PipelineExecutionError('No choises from OpenAI');
|
|
8007
|
-
}
|
|
8008
|
-
if (rawResponse.choices.length > 1) {
|
|
8009
|
-
// TODO: This should be maybe only warning
|
|
8010
|
-
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
8011
|
-
}
|
|
8012
|
-
resultContent = rawResponse.choices[0].message.content;
|
|
8013
|
-
// eslint-disable-next-line prefer-const
|
|
8014
|
-
complete = getCurrentIsoDate();
|
|
8015
|
-
usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
8016
|
-
if (resultContent === null) {
|
|
8017
|
-
throw new PipelineExecutionError('No response message from OpenAI');
|
|
8018
|
-
}
|
|
8019
|
-
return [2 /*return*/, {
|
|
8020
|
-
content: resultContent,
|
|
8021
|
-
modelName: rawResponse.model || modelName,
|
|
8022
|
-
timing: {
|
|
8023
|
-
start: start,
|
|
8024
|
-
complete: complete,
|
|
8025
|
-
},
|
|
8026
|
-
usage: usage,
|
|
8027
|
-
rawPromptContent: rawPromptContent,
|
|
8028
|
-
rawRequest: rawRequest,
|
|
8029
|
-
rawResponse: rawResponse,
|
|
8030
|
-
// <- [🗯]
|
|
8031
|
-
}];
|
|
8032
|
-
}
|
|
8033
|
-
});
|
|
8034
|
-
});
|
|
8035
|
-
};
|
|
8036
|
-
/**
|
|
8037
|
-
* Calls OpenAI API to use a complete model.
|
|
8038
|
-
*/
|
|
8039
|
-
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
8040
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
8041
|
-
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
8042
|
-
return __generator(this, function (_a) {
|
|
8043
|
-
switch (_a.label) {
|
|
8044
|
-
case 0:
|
|
8045
|
-
if (this.options.isVerbose) {
|
|
8046
|
-
console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
|
|
8047
|
-
}
|
|
8048
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
8049
|
-
return [4 /*yield*/, this.getClient()];
|
|
8050
|
-
case 1:
|
|
8051
|
-
client = _a.sent();
|
|
8052
|
-
// TODO: [☂] Use here more modelRequirements
|
|
8053
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
8054
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
8055
|
-
}
|
|
8056
|
-
modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
8057
|
-
modelSettings = {
|
|
8058
|
-
model: modelName,
|
|
8059
|
-
max_tokens: modelRequirements.maxTokens || 2000,
|
|
8060
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
8061
|
-
temperature: modelRequirements.temperature,
|
|
8062
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
8063
|
-
// <- Note: [🧆]
|
|
8064
|
-
};
|
|
8065
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
8066
|
-
rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
|
|
8067
|
-
start = getCurrentIsoDate();
|
|
8068
|
-
if (this.options.isVerbose) {
|
|
8069
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
8070
|
-
}
|
|
8071
|
-
return [4 /*yield*/, client.completions.create(rawRequest)];
|
|
8072
|
-
case 2:
|
|
8073
|
-
rawResponse = _a.sent();
|
|
8074
|
-
if (this.options.isVerbose) {
|
|
8075
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
8076
|
-
}
|
|
8077
|
-
if (!rawResponse.choices[0]) {
|
|
8078
|
-
throw new PipelineExecutionError('No choises from OpenAI');
|
|
8079
|
-
}
|
|
8080
|
-
if (rawResponse.choices.length > 1) {
|
|
8081
|
-
// TODO: This should be maybe only warning
|
|
8082
|
-
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
8083
|
-
}
|
|
8084
|
-
resultContent = rawResponse.choices[0].text;
|
|
8085
|
-
// eslint-disable-next-line prefer-const
|
|
8086
|
-
complete = getCurrentIsoDate();
|
|
8087
|
-
usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
8088
|
-
return [2 /*return*/, {
|
|
8089
|
-
content: resultContent,
|
|
8090
|
-
modelName: rawResponse.model || modelName,
|
|
8091
|
-
timing: {
|
|
8092
|
-
start: start,
|
|
8093
|
-
complete: complete,
|
|
8094
|
-
},
|
|
8095
|
-
usage: usage,
|
|
8096
|
-
rawPromptContent: rawPromptContent,
|
|
8097
|
-
rawRequest: rawRequest,
|
|
8098
|
-
rawResponse: rawResponse,
|
|
8099
|
-
// <- [🗯]
|
|
8100
|
-
}];
|
|
8101
|
-
}
|
|
8102
|
-
});
|
|
8103
|
-
});
|
|
8104
|
-
};
|
|
8105
|
-
/**
|
|
8106
|
-
* Calls OpenAI API to use a embedding model
|
|
8107
|
-
*/
|
|
8108
|
-
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
8109
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
8110
|
-
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
8111
|
-
return __generator(this, function (_a) {
|
|
8112
|
-
switch (_a.label) {
|
|
8113
|
-
case 0:
|
|
8114
|
-
if (this.options.isVerbose) {
|
|
8115
|
-
console.info('🖋 OpenAI embedding call', { prompt: prompt });
|
|
8116
|
-
}
|
|
8117
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
8118
|
-
return [4 /*yield*/, this.getClient()];
|
|
8119
|
-
case 1:
|
|
8120
|
-
client = _a.sent();
|
|
8121
|
-
// TODO: [☂] Use here more modelRequirements
|
|
8122
|
-
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
8123
|
-
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
8124
|
-
}
|
|
8125
|
-
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
8126
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
8127
|
-
rawRequest = {
|
|
8128
|
-
input: rawPromptContent,
|
|
8129
|
-
model: modelName,
|
|
8130
|
-
};
|
|
8131
|
-
start = getCurrentIsoDate();
|
|
8132
|
-
if (this.options.isVerbose) {
|
|
8133
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
8134
|
-
}
|
|
8135
|
-
return [4 /*yield*/, client.embeddings.create(rawRequest)];
|
|
8136
|
-
case 2:
|
|
8137
|
-
rawResponse = _a.sent();
|
|
8138
|
-
if (this.options.isVerbose) {
|
|
8139
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
8140
|
-
}
|
|
8141
|
-
if (rawResponse.data.length !== 1) {
|
|
8142
|
-
throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
|
|
8143
|
-
}
|
|
8144
|
-
resultContent = rawResponse.data[0].embedding;
|
|
8145
|
-
// eslint-disable-next-line prefer-const
|
|
8146
|
-
complete = getCurrentIsoDate();
|
|
8147
|
-
usage = computeOpenAiUsage(content, '', rawResponse);
|
|
8148
|
-
return [2 /*return*/, {
|
|
8149
|
-
content: resultContent,
|
|
8150
|
-
modelName: rawResponse.model || modelName,
|
|
8151
|
-
timing: {
|
|
8152
|
-
start: start,
|
|
8153
|
-
complete: complete,
|
|
8154
|
-
},
|
|
8155
|
-
usage: usage,
|
|
8156
|
-
rawPromptContent: rawPromptContent,
|
|
8157
|
-
rawRequest: rawRequest,
|
|
8158
|
-
rawResponse: rawResponse,
|
|
8159
|
-
// <- [🗯]
|
|
8160
|
-
}];
|
|
8161
|
-
}
|
|
8162
|
-
});
|
|
8163
|
-
});
|
|
8164
|
-
};
|
|
8165
|
-
// <- Note: [🤖] callXxxModel
|
|
8166
|
-
/**
|
|
8167
|
-
* Get the model that should be used as default
|
|
8168
|
-
*/
|
|
8169
|
-
OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
|
|
8170
|
-
var model = OPENAI_MODELS.find(function (_a) {
|
|
8171
|
-
var modelName = _a.modelName;
|
|
8172
|
-
return modelName === defaultModelName;
|
|
8173
|
-
});
|
|
8174
|
-
if (model === undefined) {
|
|
8175
|
-
throw new UnexpectedError(spaceTrim(function (block) {
|
|
8176
|
-
return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
|
|
8177
|
-
var modelName = _a.modelName;
|
|
8178
|
-
return "- \"".concat(modelName, "\"");
|
|
8179
|
-
}).join('\n')), "\n\n ");
|
|
8180
|
-
}));
|
|
8181
|
-
}
|
|
8182
|
-
return model;
|
|
8183
|
-
};
|
|
8184
|
-
/**
|
|
8185
|
-
* Default model for chat variant.
|
|
8186
|
-
*/
|
|
8187
|
-
OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
|
|
8188
|
-
return this.getDefaultModel('gpt-4o');
|
|
8189
|
-
};
|
|
8190
|
-
/**
|
|
8191
|
-
* Default model for completion variant.
|
|
8192
|
-
*/
|
|
8193
|
-
OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
|
|
8194
|
-
return this.getDefaultModel('gpt-3.5-turbo-instruct');
|
|
8195
|
-
};
|
|
8196
|
-
/**
|
|
8197
|
-
* Default model for completion variant.
|
|
8198
|
-
*/
|
|
8199
|
-
OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
|
|
8200
|
-
return this.getDefaultModel('text-embedding-3-large');
|
|
8201
|
-
};
|
|
8202
|
-
return OpenAiExecutionTools;
|
|
8203
|
-
}());
|
|
8204
|
-
/**
|
|
8205
|
-
* TODO: [🧠][🧙♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
|
|
8206
|
-
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
8207
|
-
* TODO: Maybe make custom OpenAiError
|
|
8208
|
-
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
8209
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
8210
|
-
*/
|
|
8211
|
-
|
|
8212
|
-
/**
|
|
8213
|
-
* Execution Tools for calling OpenAI API
|
|
8214
|
-
*
|
|
8215
|
-
* @public exported from `@promptbook/openai`
|
|
8216
|
-
*/
|
|
8217
|
-
var createOpenAiExecutionTools = Object.assign(function (options) {
|
|
8218
|
-
// TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
|
|
8219
|
-
return new OpenAiExecutionTools(options);
|
|
8220
|
-
}, {
|
|
8221
|
-
packageName: '@promptbook/openai',
|
|
8222
|
-
className: 'OpenAiExecutionTools',
|
|
8223
|
-
});
|
|
8224
|
-
/**
|
|
8225
|
-
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
8226
|
-
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
8227
|
-
*/
|
|
8228
|
-
|
|
8229
|
-
/**
|
|
8230
|
-
* @@@
|
|
8231
|
-
*
|
|
8232
|
-
* TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
|
|
8233
|
-
*
|
|
8234
|
-
* @private internal type for `createLlmToolsFromConfiguration`
|
|
8235
|
-
*/
|
|
8236
|
-
var EXECUTION_TOOLS_CLASSES = {
|
|
8237
|
-
createOpenAiExecutionTools: createOpenAiExecutionTools,
|
|
8238
|
-
createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
|
|
8239
|
-
createAzureOpenAiExecutionTools: function (options) {
|
|
8240
|
-
return new AzureOpenAiExecutionTools(
|
|
8241
|
-
// <- TODO: [🧱] Implement in a functional (not new Class) way
|
|
8242
|
-
options);
|
|
8243
|
-
},
|
|
8244
|
-
// <- Note: [🦑] Add here new LLM provider
|
|
8245
|
-
};
|
|
8246
|
-
/**
|
|
8247
|
-
* TODO: !!!!!!! Make global register for this
|
|
8248
|
-
* TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
|
|
8249
|
-
*/
|
|
8250
|
-
|
|
8251
|
-
/**
|
|
8252
|
-
* @@@
|
|
8253
|
-
*
|
|
8254
|
-
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
8255
|
-
*
|
|
8256
|
-
* @returns @@@
|
|
8257
|
-
* @public exported from `@promptbook/core`
|
|
8258
|
-
*/
|
|
8259
|
-
function createLlmToolsFromConfiguration(configuration, options) {
|
|
8260
|
-
if (options === void 0) { options = {}; }
|
|
8261
|
-
var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
|
|
8262
|
-
var llmTools = configuration.map(function (llmConfiguration) {
|
|
8263
|
-
var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
|
|
8264
|
-
if (!constructor) {
|
|
8265
|
-
throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
|
|
6732
|
+
if (registeredItem === undefined) {
|
|
6733
|
+
throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
|
|
8266
6734
|
}
|
|
8267
|
-
return
|
|
6735
|
+
return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
|
|
8268
6736
|
});
|
|
8269
6737
|
return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
|
|
8270
6738
|
}
|
|
@@ -8535,7 +7003,7 @@ var _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register({
|
|
|
8535
7003
|
if (typeof env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
|
|
8536
7004
|
return {
|
|
8537
7005
|
title: 'Claude (from env)',
|
|
8538
|
-
packageName: '@promptbook/
|
|
7006
|
+
packageName: '@promptbook/anthropic-claude',
|
|
8539
7007
|
className: 'AnthropicClaudeExecutionTools',
|
|
8540
7008
|
options: {
|
|
8541
7009
|
apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
|
|
@@ -8979,5 +7447,5 @@ function executionReportJsonToString(executionReportJson, options) {
|
|
|
8979
7447
|
* TODO: [🧠] Should be in generated file GENERATOR_WARNING
|
|
8980
7448
|
*/
|
|
8981
7449
|
|
|
8982
|
-
export { $llmToolsMetadataRegister, $llmToolsRegister, BlockTypes, CLAIM, CallbackInterfaceTools, CollectionError, DEFAULT_REMOTE_URL, DEFAULT_REMOTE_URL_PATH, EXECUTIONS_CACHE_DIRNAME, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, LimitReachedError, MAX_EXECUTION_ATTEMPTS, MAX_FILENAME_LENGTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, MAX_PARALLEL_COUNT, MODEL_VARIANTS, MemoryStorage, NotFoundError, NotYetImplementedError, PIPELINE_COLLECTION_BASE_FILENAME, PROMPTBOOK_VERSION, ParsingError, PipelineExecutionError, PipelineLogicError, PrefixStorage, RESERVED_PARAMETER_NAMES, ReferenceError$1 as ReferenceError, UnexpectedError, VersionMismatchError, ZERO_USAGE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _OpenAiMetadataRegistration, addUsage, assertsExecutionSuccessful, cacheLlmTools, collectionToJson, countTotalUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, isPassingExpectations, isPipelinePrepared, joinLlmExecutionTools, limitTotalUsage, pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, prepareKnowledgeFromMarkdown, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTemplates, prettifyPipelineString, stringifyPipelineJson, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline };
|
|
7450
|
+
export { $llmToolsMetadataRegister, $llmToolsRegister, BlockTypes, CLAIM, CallbackInterfaceTools, CollectionError, DEFAULT_REMOTE_URL, DEFAULT_REMOTE_URL_PATH, EXECUTIONS_CACHE_DIRNAME, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, IS_VERBOSE, LimitReachedError, MAX_EXECUTION_ATTEMPTS, MAX_FILENAME_LENGTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, MAX_PARALLEL_COUNT, MODEL_VARIANTS, MemoryStorage, NotFoundError, NotYetImplementedError, PIPELINE_COLLECTION_BASE_FILENAME, PROMPTBOOK_VERSION, ParsingError, PipelineExecutionError, PipelineLogicError, PrefixStorage, RESERVED_PARAMETER_NAMES, ReferenceError$1 as ReferenceError, UnexpectedError, VersionMismatchError, ZERO_USAGE, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _OpenAiMetadataRegistration, addUsage, assertsExecutionSuccessful, cacheLlmTools, collectionToJson, countTotalUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, isPassingExpectations, isPipelinePrepared, joinLlmExecutionTools, limitTotalUsage, pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, prepareKnowledgeFromMarkdown, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTemplates, prettifyPipelineString, stringifyPipelineJson, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline };
|
|
8983
7451
|
//# sourceMappingURL=index.es.js.map
|