@promptbook/node 0.66.0-8 → 0.66.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +202 -1701
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +0 -2
- package/esm/typings/src/_packages/cli.index.d.ts +6 -0
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/types.index.d.ts +22 -14
- package/esm/typings/src/_packages/utils.index.d.ts +7 -7
- package/esm/typings/src/config.d.ts +6 -0
- package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +2 -2
- package/esm/typings/src/llm-providers/_common/$registeredLlmToolsMessage.d.ts +9 -0
- package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/createLlmToolsFromConfigurationFromEnv.d.ts +1 -0
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +1 -0
- package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +1 -0
- package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Error.d.ts → PromptbookServer_Error.d.ts} +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Request.d.ts +34 -0
- package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_ListModels_Response.d.ts +15 -0
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Progress.d.ts → PromptbookServer_Prompt_Progress.d.ts} +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Request.d.ts → PromptbookServer_Prompt_Request.d.ts} +15 -3
- package/esm/typings/src/llm-providers/remote/interfaces/{Promptbook_Server_Response.d.ts → PromptbookServer_Prompt_Response.d.ts} +1 -1
- package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +1 -7
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
- package/esm/typings/src/utils/{Register.d.ts → $Register.d.ts} +6 -2
- package/esm/typings/src/utils/environment/{getGlobalScope.d.ts → $getGlobalScope.d.ts} +1 -1
- package/esm/typings/src/utils/organization/f.d.ts +6 -0
- package/package.json +2 -6
- package/umd/index.umd.js +206 -1703
- package/umd/index.umd.js.map +1 -1
- package/esm/typings/src/llm-providers/_common/config.d.ts +0 -14
- package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +0 -4
- /package/esm/typings/src/llm-providers/mocked/{fakeTextToExpectations.d.ts → $fakeTextToExpectations.d.ts} +0 -0
- /package/esm/typings/src/utils/{currentDate.d.ts → $currentDate.d.ts} +0 -0
- /package/esm/typings/src/utils/environment/{isRunningInBrowser.d.ts → $isRunningInBrowser.d.ts} +0 -0
- /package/esm/typings/src/utils/environment/{isRunningInNode.d.ts → $isRunningInNode.d.ts} +0 -0
- /package/esm/typings/src/utils/environment/{isRunningInWebWorker.d.ts → $isRunningInWebWorker.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isDirectoryExisting.d.ts → $isDirectoryExisting.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isDirectoryExisting.test.d.ts → $isDirectoryExisting.test.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isFileExisting.d.ts → $isFileExisting.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{isFileExisting.test.d.ts → $isFileExisting.test.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{listAllFiles.d.ts → $listAllFiles.d.ts} +0 -0
- /package/esm/typings/src/utils/files/{listAllFiles.test.d.ts → $listAllFiles.test.d.ts} +0 -0
- /package/esm/typings/src/utils/random/{randomSeed.d.ts → $randomSeed.d.ts} +0 -0
package/esm/index.es.js
CHANGED
|
@@ -8,16 +8,12 @@ import hexEncoder from 'crypto-js/enc-hex';
|
|
|
8
8
|
import sha256 from 'crypto-js/sha256';
|
|
9
9
|
import { join } from 'path/posix';
|
|
10
10
|
import * as dotenv from 'dotenv';
|
|
11
|
-
import { io } from 'socket.io-client';
|
|
12
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
13
|
-
import { OpenAIClient, AzureKeyCredential } from '@azure/openai';
|
|
14
|
-
import OpenAI from 'openai';
|
|
15
11
|
|
|
16
12
|
// ⚠️ WARNING: This code has been generated so that any manual changes will be overwritten
|
|
17
13
|
/**
|
|
18
14
|
* The version of the Promptbook library
|
|
19
15
|
*/
|
|
20
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
16
|
+
var PROMPTBOOK_VERSION = '0.66.0-9';
|
|
21
17
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
22
18
|
|
|
23
19
|
/*! *****************************************************************************
|
|
@@ -185,6 +181,26 @@ function deepFreezeWithSameType(objectValue) {
|
|
|
185
181
|
* TODO: [🧠] Is there a way how to meaningfully test this utility
|
|
186
182
|
*/
|
|
187
183
|
|
|
184
|
+
/**
|
|
185
|
+
* Returns the same value that is passed as argument.
|
|
186
|
+
* No side effects.
|
|
187
|
+
*
|
|
188
|
+
* Note: It can be usefull for:
|
|
189
|
+
*
|
|
190
|
+
* 1) Leveling indentation
|
|
191
|
+
* 2) Putting always-true or always-false conditions without getting eslint errors
|
|
192
|
+
*
|
|
193
|
+
* @param value any values
|
|
194
|
+
* @returns the same values
|
|
195
|
+
* @private within the repository
|
|
196
|
+
*/
|
|
197
|
+
function just(value) {
|
|
198
|
+
if (value === undefined) {
|
|
199
|
+
return undefined;
|
|
200
|
+
}
|
|
201
|
+
return value;
|
|
202
|
+
}
|
|
203
|
+
|
|
188
204
|
// <- TODO: [🧠] Better system for generator warnings - not always "code" and "by `@promptbook/cli`"
|
|
189
205
|
/**
|
|
190
206
|
* The maximum number of iterations for a loops
|
|
@@ -249,6 +265,13 @@ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
|
249
265
|
* @private within the repository
|
|
250
266
|
*/
|
|
251
267
|
var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
|
|
268
|
+
// <- TODO: [🧜♂️]
|
|
269
|
+
/**
|
|
270
|
+
* @@@
|
|
271
|
+
*
|
|
272
|
+
* @public exported from `@promptbook/core`
|
|
273
|
+
*/
|
|
274
|
+
var IS_VERBOSE = false;
|
|
252
275
|
/**
|
|
253
276
|
* TODO: [🧠][🧜♂️] Maybe join remoteUrl and path into single value
|
|
254
277
|
*/
|
|
@@ -696,7 +719,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
696
719
|
});
|
|
697
720
|
}
|
|
698
721
|
|
|
699
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-
|
|
722
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
700
723
|
|
|
701
724
|
/**
|
|
702
725
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -1924,8 +1947,37 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
1924
1947
|
*/
|
|
1925
1948
|
MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
1926
1949
|
return __awaiter(this, void 0, void 0, function () {
|
|
1927
|
-
|
|
1928
|
-
|
|
1950
|
+
var _a, _b, llmExecutionTools, e_1_1;
|
|
1951
|
+
var e_1, _c;
|
|
1952
|
+
return __generator(this, function (_d) {
|
|
1953
|
+
switch (_d.label) {
|
|
1954
|
+
case 0:
|
|
1955
|
+
_d.trys.push([0, 5, 6, 7]);
|
|
1956
|
+
_a = __values(this.llmExecutionTools), _b = _a.next();
|
|
1957
|
+
_d.label = 1;
|
|
1958
|
+
case 1:
|
|
1959
|
+
if (!!_b.done) return [3 /*break*/, 4];
|
|
1960
|
+
llmExecutionTools = _b.value;
|
|
1961
|
+
return [4 /*yield*/, llmExecutionTools.checkConfiguration()];
|
|
1962
|
+
case 2:
|
|
1963
|
+
_d.sent();
|
|
1964
|
+
_d.label = 3;
|
|
1965
|
+
case 3:
|
|
1966
|
+
_b = _a.next();
|
|
1967
|
+
return [3 /*break*/, 1];
|
|
1968
|
+
case 4: return [3 /*break*/, 7];
|
|
1969
|
+
case 5:
|
|
1970
|
+
e_1_1 = _d.sent();
|
|
1971
|
+
e_1 = { error: e_1_1 };
|
|
1972
|
+
return [3 /*break*/, 7];
|
|
1973
|
+
case 6:
|
|
1974
|
+
try {
|
|
1975
|
+
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
1976
|
+
}
|
|
1977
|
+
finally { if (e_1) throw e_1.error; }
|
|
1978
|
+
return [7 /*endfinally*/];
|
|
1979
|
+
case 7: return [2 /*return*/];
|
|
1980
|
+
}
|
|
1929
1981
|
});
|
|
1930
1982
|
});
|
|
1931
1983
|
};
|
|
@@ -1935,8 +1987,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
1935
1987
|
*/
|
|
1936
1988
|
MultipleLlmExecutionTools.prototype.listModels = function () {
|
|
1937
1989
|
return __awaiter(this, void 0, void 0, function () {
|
|
1938
|
-
var availableModels, _a, _b, llmExecutionTools, models,
|
|
1939
|
-
var
|
|
1990
|
+
var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
|
|
1991
|
+
var e_2, _c;
|
|
1940
1992
|
return __generator(this, function (_d) {
|
|
1941
1993
|
switch (_d.label) {
|
|
1942
1994
|
case 0:
|
|
@@ -1959,14 +2011,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
1959
2011
|
return [3 /*break*/, 2];
|
|
1960
2012
|
case 5: return [3 /*break*/, 8];
|
|
1961
2013
|
case 6:
|
|
1962
|
-
|
|
1963
|
-
|
|
2014
|
+
e_2_1 = _d.sent();
|
|
2015
|
+
e_2 = { error: e_2_1 };
|
|
1964
2016
|
return [3 /*break*/, 8];
|
|
1965
2017
|
case 7:
|
|
1966
2018
|
try {
|
|
1967
2019
|
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
1968
2020
|
}
|
|
1969
|
-
finally { if (
|
|
2021
|
+
finally { if (e_2) throw e_2.error; }
|
|
1970
2022
|
return [7 /*endfinally*/];
|
|
1971
2023
|
case 8: return [2 /*return*/, availableModels];
|
|
1972
2024
|
}
|
|
@@ -1999,8 +2051,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
1999
2051
|
*/
|
|
2000
2052
|
MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
2001
2053
|
return __awaiter(this, void 0, void 0, function () {
|
|
2002
|
-
var errors, _a, _b, llmExecutionTools, _c, error_1,
|
|
2003
|
-
var
|
|
2054
|
+
var errors, _a, _b, llmExecutionTools, _c, error_1, e_3_1;
|
|
2055
|
+
var e_3, _d;
|
|
2004
2056
|
var _this = this;
|
|
2005
2057
|
return __generator(this, function (_e) {
|
|
2006
2058
|
switch (_e.label) {
|
|
@@ -2056,14 +2108,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
|
|
|
2056
2108
|
return [3 /*break*/, 2];
|
|
2057
2109
|
case 14: return [3 /*break*/, 17];
|
|
2058
2110
|
case 15:
|
|
2059
|
-
|
|
2060
|
-
|
|
2111
|
+
e_3_1 = _e.sent();
|
|
2112
|
+
e_3 = { error: e_3_1 };
|
|
2061
2113
|
return [3 /*break*/, 17];
|
|
2062
2114
|
case 16:
|
|
2063
2115
|
try {
|
|
2064
2116
|
if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
|
|
2065
2117
|
}
|
|
2066
|
-
finally { if (
|
|
2118
|
+
finally { if (e_3) throw e_3.error; }
|
|
2067
2119
|
return [7 /*endfinally*/];
|
|
2068
2120
|
case 17:
|
|
2069
2121
|
if (errors.length === 1) {
|
|
@@ -2524,7 +2576,7 @@ function checkExpectations(expectations, value) {
|
|
|
2524
2576
|
function createPipelineExecutor(options) {
|
|
2525
2577
|
var _this = this;
|
|
2526
2578
|
var pipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
|
|
2527
|
-
var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ?
|
|
2579
|
+
var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? IS_VERBOSE : _d, _e = settings.isNotPreparedWarningSupressed, isNotPreparedWarningSupressed = _e === void 0 ? false : _e;
|
|
2528
2580
|
validatePipeline(pipeline);
|
|
2529
2581
|
var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
|
|
2530
2582
|
var preparedPipeline;
|
|
@@ -3319,7 +3371,7 @@ function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?mayb
|
|
|
3319
3371
|
return __generator(this, function (_j) {
|
|
3320
3372
|
switch (_j.label) {
|
|
3321
3373
|
case 0:
|
|
3322
|
-
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ?
|
|
3374
|
+
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b;
|
|
3323
3375
|
TODO_USE(maxParallelCount); // <- [🪂]
|
|
3324
3376
|
collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
|
|
3325
3377
|
_c = createPipelineExecutor;
|
|
@@ -3606,7 +3658,7 @@ function preparePersona(personaDescription, options) {
|
|
|
3606
3658
|
return __generator(this, function (_d) {
|
|
3607
3659
|
switch (_d.label) {
|
|
3608
3660
|
case 0:
|
|
3609
|
-
llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ?
|
|
3661
|
+
llmTools = options.llmTools, _a = options.isVerbose, isVerbose = _a === void 0 ? IS_VERBOSE : _a;
|
|
3610
3662
|
collection = createCollectionFromJson.apply(void 0, __spreadArray([], __read(PipelineCollection), false));
|
|
3611
3663
|
_b = createPipelineExecutor;
|
|
3612
3664
|
_c = {};
|
|
@@ -3763,7 +3815,7 @@ function preparePipeline(pipeline, options) {
|
|
|
3763
3815
|
if (isPipelinePrepared(pipeline)) {
|
|
3764
3816
|
return [2 /*return*/, pipeline];
|
|
3765
3817
|
}
|
|
3766
|
-
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ?
|
|
3818
|
+
llmTools = options.llmTools, _a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a, _b = options.isVerbose, isVerbose = _b === void 0 ? IS_VERBOSE : _b;
|
|
3767
3819
|
parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3768
3820
|
llmToolsWithUsage = countTotalUsage(llmTools);
|
|
3769
3821
|
currentPreparation = {
|
|
@@ -6128,7 +6180,7 @@ function createCollectionFromDirectory(path, options) {
|
|
|
6128
6180
|
// TODO: !! Implement;
|
|
6129
6181
|
// TODO: [🌗]
|
|
6130
6182
|
}
|
|
6131
|
-
_a = options || {}, _b = _a.isRecursive, isRecursive = _b === void 0 ? true : _b, _c = _a.isVerbose, isVerbose = _c === void 0 ?
|
|
6183
|
+
_a = options || {}, _b = _a.isRecursive, isRecursive = _b === void 0 ? true : _b, _c = _a.isVerbose, isVerbose = _c === void 0 ? IS_VERBOSE : _c, _d = _a.isLazyLoaded, isLazyLoaded = _d === void 0 ? false : _d, _e = _a.isCrashedOnError, isCrashedOnError = _e === void 0 ? true : _e;
|
|
6132
6184
|
collection = createCollectionFromPromise(function () { return __awaiter(_this, void 0, void 0, function () {
|
|
6133
6185
|
var fileNames, collection, _loop_1, fileNames_1, fileNames_1_1, fileName, e_1_1;
|
|
6134
6186
|
var e_1, _a;
|
|
@@ -6306,7 +6358,7 @@ var EnvironmentMismatchError = /** @class */ (function (_super) {
|
|
|
6306
6358
|
/**
|
|
6307
6359
|
* @@@
|
|
6308
6360
|
*
|
|
6309
|
-
* Note: `$` is used to indicate that this function is not a pure function - it access global
|
|
6361
|
+
* Note: `$` is used to indicate that this function is not a pure function - it access global scope
|
|
6310
6362
|
*
|
|
6311
6363
|
* @public exported from `@promptbook/utils`
|
|
6312
6364
|
*/
|
|
@@ -6320,42 +6372,42 @@ function $getGlobalScope() {
|
|
|
6320
6372
|
/**
|
|
6321
6373
|
* Register is @@@
|
|
6322
6374
|
*
|
|
6375
|
+
* Note: `$` is used to indicate that this function is not a pure function - it accesses and adds variables in global scope.
|
|
6376
|
+
*
|
|
6323
6377
|
* @private internal utility, exported are only signleton instances of this class
|
|
6324
6378
|
*/
|
|
6325
|
-
var Register = /** @class */ (function () {
|
|
6326
|
-
function Register(
|
|
6327
|
-
this.
|
|
6379
|
+
var $Register = /** @class */ (function () {
|
|
6380
|
+
function $Register(storageName) {
|
|
6381
|
+
this.storageName = storageName;
|
|
6382
|
+
storageName = "_promptbook_".concat(storageName);
|
|
6383
|
+
var globalScope = $getGlobalScope();
|
|
6384
|
+
if (globalScope[storageName] === undefined) {
|
|
6385
|
+
globalScope[storageName] = [];
|
|
6386
|
+
}
|
|
6387
|
+
else if (!Array.isArray(globalScope[storageName])) {
|
|
6388
|
+
throw new UnexpectedError("Expected (global) ".concat(storageName, " to be an array, but got ").concat(typeof globalScope[storageName]));
|
|
6389
|
+
}
|
|
6390
|
+
this.storage = globalScope[storageName];
|
|
6328
6391
|
}
|
|
6329
|
-
Register.prototype.list = function () {
|
|
6392
|
+
$Register.prototype.list = function () {
|
|
6330
6393
|
// <- TODO: ReadonlyDeep<Array<TRegistered>>
|
|
6331
6394
|
return this.storage;
|
|
6332
6395
|
};
|
|
6333
|
-
Register.prototype.register = function (registered) {
|
|
6396
|
+
$Register.prototype.register = function (registered) {
|
|
6334
6397
|
// <- TODO: What to return here
|
|
6335
6398
|
var packageName = registered.packageName, className = registered.className;
|
|
6336
6399
|
var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
|
|
6337
6400
|
var existingRegistration = this.storage[existingRegistrationIndex];
|
|
6338
6401
|
if (!existingRegistration) {
|
|
6339
|
-
console.warn("[\uD83D\uDCE6] Registering ".concat(packageName, ".").concat(className, " again"));
|
|
6340
6402
|
this.storage.push(registered);
|
|
6341
6403
|
}
|
|
6342
6404
|
else {
|
|
6343
|
-
console.warn("[\uD83D\uDCE6] Re-registering ".concat(packageName, ".").concat(className, " again"));
|
|
6344
6405
|
this.storage[existingRegistrationIndex] = registered;
|
|
6345
6406
|
}
|
|
6346
6407
|
};
|
|
6347
|
-
return Register;
|
|
6408
|
+
return $Register;
|
|
6348
6409
|
}());
|
|
6349
6410
|
|
|
6350
|
-
// TODO: !!!!!! Move this logic to Register and rename to $Register
|
|
6351
|
-
var globalScope = $getGlobalScope();
|
|
6352
|
-
if (globalScope.$llmToolsMetadataRegister === undefined) {
|
|
6353
|
-
globalScope.$llmToolsMetadataRegister = [];
|
|
6354
|
-
}
|
|
6355
|
-
else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
|
|
6356
|
-
throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
|
|
6357
|
-
}
|
|
6358
|
-
var _ = globalScope.$llmToolsMetadataRegister;
|
|
6359
6411
|
/**
|
|
6360
6412
|
* @@@
|
|
6361
6413
|
*
|
|
@@ -6363,8 +6415,7 @@ var _ = globalScope.$llmToolsMetadataRegister;
|
|
|
6363
6415
|
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
6364
6416
|
* @public exported from `@promptbook/core`
|
|
6365
6417
|
*/
|
|
6366
|
-
var $llmToolsMetadataRegister = new Register(
|
|
6367
|
-
$getGlobalScope().$llmToolsMetadataRegister;
|
|
6418
|
+
var $llmToolsMetadataRegister = new $Register('llm_tools_metadata');
|
|
6368
6419
|
|
|
6369
6420
|
/**
|
|
6370
6421
|
* @@@
|
|
@@ -6390,6 +6441,7 @@ function createLlmToolsFromConfigurationFromEnv() {
|
|
|
6390
6441
|
return llmToolsConfiguration;
|
|
6391
6442
|
}
|
|
6392
6443
|
/**
|
|
6444
|
+
* TODO: [🧠][🪁] Maybe do allow to do auto-install if package not registered and not found
|
|
6393
6445
|
* TODO: Add Azure OpenAI
|
|
6394
6446
|
* TODO: [🧠][🍛]
|
|
6395
6447
|
* TODO: [🧠] Is there some meaningfull way how to test this util
|
|
@@ -6400,1682 +6452,131 @@ function createLlmToolsFromConfigurationFromEnv() {
|
|
|
6400
6452
|
*/
|
|
6401
6453
|
|
|
6402
6454
|
/**
|
|
6403
|
-
*
|
|
6404
|
-
*
|
|
6405
|
-
* You can simply use `RemoteExecutionTools` on client-side javascript and connect to your remote server.
|
|
6406
|
-
* This is useful to make all logic on browser side but not expose your API keys or no need to use customer's GPU.
|
|
6407
|
-
*
|
|
6408
|
-
* @see https://github.com/webgptorg/promptbook#remote-server
|
|
6409
|
-
* @public exported from `@promptbook/remote-client`
|
|
6410
|
-
*/
|
|
6411
|
-
var RemoteLlmExecutionTools = /** @class */ (function () {
|
|
6412
|
-
function RemoteLlmExecutionTools(options) {
|
|
6413
|
-
this.options = options;
|
|
6414
|
-
}
|
|
6415
|
-
Object.defineProperty(RemoteLlmExecutionTools.prototype, "title", {
|
|
6416
|
-
get: function () {
|
|
6417
|
-
// TODO: [🧠] Maybe fetch title+description from the remote server (as well as if model methods are defined)
|
|
6418
|
-
return 'Remote server';
|
|
6419
|
-
},
|
|
6420
|
-
enumerable: false,
|
|
6421
|
-
configurable: true
|
|
6422
|
-
});
|
|
6423
|
-
Object.defineProperty(RemoteLlmExecutionTools.prototype, "description", {
|
|
6424
|
-
get: function () {
|
|
6425
|
-
return 'Use all models by your remote server';
|
|
6426
|
-
},
|
|
6427
|
-
enumerable: false,
|
|
6428
|
-
configurable: true
|
|
6429
|
-
});
|
|
6430
|
-
/**
|
|
6431
|
-
* Check the configuration of all execution tools
|
|
6432
|
-
*/
|
|
6433
|
-
RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
6434
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6435
|
-
return __generator(this, function (_a) {
|
|
6436
|
-
return [2 /*return*/];
|
|
6437
|
-
});
|
|
6438
|
-
});
|
|
6439
|
-
};
|
|
6440
|
-
/**
|
|
6441
|
-
* List all available models that can be used
|
|
6442
|
-
*/
|
|
6443
|
-
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
6444
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6445
|
-
return __generator(this, function (_a) {
|
|
6446
|
-
return [2 /*return*/, (this.options.models ||
|
|
6447
|
-
[
|
|
6448
|
-
/* !!!!!! */
|
|
6449
|
-
])];
|
|
6450
|
-
});
|
|
6451
|
-
});
|
|
6452
|
-
};
|
|
6453
|
-
/**
|
|
6454
|
-
* Creates a connection to the remote proxy server.
|
|
6455
|
-
*/
|
|
6456
|
-
RemoteLlmExecutionTools.prototype.makeConnection = function () {
|
|
6457
|
-
var _this = this;
|
|
6458
|
-
return new Promise(
|
|
6459
|
-
// <- TODO: [🧱] Implement in a functional (not new Class) way
|
|
6460
|
-
function (resolve, reject) {
|
|
6461
|
-
var socket = io(_this.options.remoteUrl, {
|
|
6462
|
-
path: _this.options.path,
|
|
6463
|
-
// path: `${this.remoteUrl.pathname}/socket.io`,
|
|
6464
|
-
transports: [/*'websocket', <- TODO: [🌬] Make websocket transport work */ 'polling'],
|
|
6465
|
-
});
|
|
6466
|
-
// console.log('Connecting to', this.options.remoteUrl.href, { socket });
|
|
6467
|
-
socket.on('connect', function () {
|
|
6468
|
-
resolve(socket);
|
|
6469
|
-
});
|
|
6470
|
-
// TODO: !!!! Better timeout handling
|
|
6471
|
-
setTimeout(function () {
|
|
6472
|
-
reject(new Error("Timeout while connecting to ".concat(_this.options.remoteUrl)));
|
|
6473
|
-
}, 1000 /* <- TODO: Timeout to config */);
|
|
6474
|
-
});
|
|
6475
|
-
};
|
|
6476
|
-
/**
|
|
6477
|
-
* Calls remote proxy server to use a chat model
|
|
6478
|
-
*/
|
|
6479
|
-
RemoteLlmExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6480
|
-
if (this.options.isVerbose) {
|
|
6481
|
-
console.info("\uD83D\uDD8B Remote callChatModel call");
|
|
6482
|
-
}
|
|
6483
|
-
return /* not await */ this.callCommonModel(prompt);
|
|
6484
|
-
};
|
|
6485
|
-
/**
|
|
6486
|
-
* Calls remote proxy server to use a completion model
|
|
6487
|
-
*/
|
|
6488
|
-
RemoteLlmExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6489
|
-
if (this.options.isVerbose) {
|
|
6490
|
-
console.info("\uD83D\uDCAC Remote callCompletionModel call");
|
|
6491
|
-
}
|
|
6492
|
-
return /* not await */ this.callCommonModel(prompt);
|
|
6493
|
-
};
|
|
6494
|
-
/**
|
|
6495
|
-
* Calls remote proxy server to use a embedding model
|
|
6496
|
-
*/
|
|
6497
|
-
RemoteLlmExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6498
|
-
if (this.options.isVerbose) {
|
|
6499
|
-
console.info("\uD83D\uDCAC Remote callEmbeddingModel call");
|
|
6500
|
-
}
|
|
6501
|
-
return /* not await */ this.callCommonModel(prompt);
|
|
6502
|
-
};
|
|
6503
|
-
// <- Note: [🤖] callXxxModel
|
|
6504
|
-
/**
|
|
6505
|
-
* Calls remote proxy server to use both completion or chat model
|
|
6506
|
-
*/
|
|
6507
|
-
RemoteLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
6508
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6509
|
-
var socket, promptResult;
|
|
6510
|
-
return __generator(this, function (_a) {
|
|
6511
|
-
switch (_a.label) {
|
|
6512
|
-
case 0: return [4 /*yield*/, this.makeConnection()];
|
|
6513
|
-
case 1:
|
|
6514
|
-
socket = _a.sent();
|
|
6515
|
-
if (this.options.isAnonymous) {
|
|
6516
|
-
socket.emit('request', {
|
|
6517
|
-
llmToolsConfiguration: this.options.llmToolsConfiguration,
|
|
6518
|
-
prompt: prompt,
|
|
6519
|
-
// <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
6520
|
-
});
|
|
6521
|
-
}
|
|
6522
|
-
else {
|
|
6523
|
-
socket.emit('request', {
|
|
6524
|
-
clientId: this.options.clientId,
|
|
6525
|
-
prompt: prompt,
|
|
6526
|
-
// <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
|
|
6527
|
-
});
|
|
6528
|
-
}
|
|
6529
|
-
return [4 /*yield*/, new Promise(function (resolve, reject) {
|
|
6530
|
-
socket.on('response', function (response) {
|
|
6531
|
-
resolve(response.promptResult);
|
|
6532
|
-
socket.disconnect();
|
|
6533
|
-
});
|
|
6534
|
-
socket.on('error', function (error) {
|
|
6535
|
-
reject(new PipelineExecutionError(error.errorMessage));
|
|
6536
|
-
socket.disconnect();
|
|
6537
|
-
});
|
|
6538
|
-
})];
|
|
6539
|
-
case 2:
|
|
6540
|
-
promptResult = _a.sent();
|
|
6541
|
-
socket.disconnect();
|
|
6542
|
-
return [2 /*return*/, promptResult];
|
|
6543
|
-
}
|
|
6544
|
-
});
|
|
6545
|
-
});
|
|
6546
|
-
};
|
|
6547
|
-
return RemoteLlmExecutionTools;
|
|
6548
|
-
}());
|
|
6549
|
-
/**
|
|
6550
|
-
* TODO: [🍓] Allow to list compatible models with each variant
|
|
6551
|
-
* TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
|
|
6552
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
6553
|
-
* TODO: [🧠] Maybe remove `@promptbook/remote-client` and just use `@promptbook/core`
|
|
6554
|
-
*/
|
|
6555
|
-
|
|
6556
|
-
/**
|
|
6557
|
-
* Function computeUsage will create price per one token based on the string value found on openai page
|
|
6558
|
-
*
|
|
6559
|
-
* @private within the repository, used only as internal helper for `OPENAI_MODELS`
|
|
6560
|
-
*/
|
|
6561
|
-
function computeUsage(value) {
|
|
6562
|
-
var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
|
|
6563
|
-
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
|
|
6564
|
-
}
|
|
6565
|
-
|
|
6566
|
-
/**
|
|
6567
|
-
* List of available Anthropic Claude models with pricing
|
|
6568
|
-
*
|
|
6569
|
-
* Note: Done at 2024-08-16
|
|
6570
|
-
*
|
|
6571
|
-
* @see https://docs.anthropic.com/en/docs/models-overview
|
|
6572
|
-
* @public exported from `@promptbook/anthropic-claude`
|
|
6573
|
-
*/
|
|
6574
|
-
var ANTHROPIC_CLAUDE_MODELS = [
|
|
6575
|
-
{
|
|
6576
|
-
modelVariant: 'CHAT',
|
|
6577
|
-
modelTitle: 'Claude 3.5 Sonnet',
|
|
6578
|
-
modelName: 'claude-3-5-sonnet-20240620',
|
|
6579
|
-
pricing: {
|
|
6580
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6581
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
6582
|
-
},
|
|
6583
|
-
},
|
|
6584
|
-
{
|
|
6585
|
-
modelVariant: 'CHAT',
|
|
6586
|
-
modelTitle: 'Claude 3 Opus',
|
|
6587
|
-
modelName: 'claude-3-opus-20240229',
|
|
6588
|
-
pricing: {
|
|
6589
|
-
prompt: computeUsage("$15.00 / 1M tokens"),
|
|
6590
|
-
output: computeUsage("$75.00 / 1M tokens"),
|
|
6591
|
-
},
|
|
6592
|
-
},
|
|
6593
|
-
{
|
|
6594
|
-
modelVariant: 'CHAT',
|
|
6595
|
-
modelTitle: 'Claude 3 Sonnet',
|
|
6596
|
-
modelName: 'claude-3-sonnet-20240229',
|
|
6597
|
-
pricing: {
|
|
6598
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6599
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
6600
|
-
},
|
|
6601
|
-
},
|
|
6602
|
-
{
|
|
6603
|
-
modelVariant: 'CHAT',
|
|
6604
|
-
modelTitle: 'Claude 3 Haiku',
|
|
6605
|
-
modelName: ' claude-3-haiku-20240307',
|
|
6606
|
-
pricing: {
|
|
6607
|
-
prompt: computeUsage("$0.25 / 1M tokens"),
|
|
6608
|
-
output: computeUsage("$1.25 / 1M tokens"),
|
|
6609
|
-
},
|
|
6610
|
-
},
|
|
6611
|
-
{
|
|
6612
|
-
modelVariant: 'CHAT',
|
|
6613
|
-
modelTitle: 'Claude 2.1',
|
|
6614
|
-
modelName: 'claude-2.1',
|
|
6615
|
-
pricing: {
|
|
6616
|
-
prompt: computeUsage("$8.00 / 1M tokens"),
|
|
6617
|
-
output: computeUsage("$24.00 / 1M tokens"),
|
|
6618
|
-
},
|
|
6619
|
-
},
|
|
6620
|
-
{
|
|
6621
|
-
modelVariant: 'CHAT',
|
|
6622
|
-
modelTitle: 'Claude 2',
|
|
6623
|
-
modelName: 'claude-2.0',
|
|
6624
|
-
pricing: {
|
|
6625
|
-
prompt: computeUsage("$8.00 / 1M tokens"),
|
|
6626
|
-
output: computeUsage("$24.00 / 1M tokens"),
|
|
6627
|
-
},
|
|
6628
|
-
},
|
|
6629
|
-
{
|
|
6630
|
-
modelVariant: 'CHAT',
|
|
6631
|
-
modelTitle: ' Claude Instant 1.2',
|
|
6632
|
-
modelName: 'claude-instant-1.2',
|
|
6633
|
-
pricing: {
|
|
6634
|
-
prompt: computeUsage("$0.80 / 1M tokens"),
|
|
6635
|
-
output: computeUsage("$2.40 / 1M tokens"),
|
|
6636
|
-
},
|
|
6637
|
-
},
|
|
6638
|
-
// TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
|
|
6639
|
-
];
|
|
6640
|
-
/**
|
|
6641
|
-
* Note: [🤖] Add models of new variant
|
|
6642
|
-
* TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
|
|
6643
|
-
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
6644
|
-
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
6645
|
-
* TODO: [🎰] Some mechanism to auto-update available models
|
|
6646
|
-
*/
|
|
6647
|
-
|
|
6648
|
-
/**
|
|
6649
|
-
* Get current date in ISO 8601 format
|
|
6455
|
+
* @@@
|
|
6650
6456
|
*
|
|
6651
|
-
*
|
|
6457
|
+
* Note: `$` is used to indicate that this interacts with the global scope
|
|
6458
|
+
* @singleton Only one instance of each register is created per build, but thare can be more @@@
|
|
6459
|
+
* @public exported from `@promptbook/core`
|
|
6652
6460
|
*/
|
|
6653
|
-
|
|
6654
|
-
return new Date().toISOString();
|
|
6655
|
-
}
|
|
6461
|
+
var $llmToolsRegister = new $Register('llm_execution_tools_constructors');
|
|
6656
6462
|
|
|
6657
6463
|
/**
|
|
6658
|
-
*
|
|
6464
|
+
* Creates a message with all registered LLM tools
|
|
6659
6465
|
*
|
|
6660
|
-
*
|
|
6661
|
-
* @returns part of PromptResultUsageCounts
|
|
6466
|
+
* Note: This function is used to create a (error) message when there is no constructor for some LLM provider
|
|
6662
6467
|
*
|
|
6663
|
-
* @private internal
|
|
6468
|
+
* @private internal function of `createLlmToolsFromConfiguration` and `createLlmToolsFromEnv`
|
|
6664
6469
|
*/
|
|
6665
|
-
function
|
|
6666
|
-
|
|
6667
|
-
|
|
6668
|
-
|
|
6669
|
-
|
|
6670
|
-
|
|
6671
|
-
|
|
6672
|
-
|
|
6470
|
+
function $registeredLlmToolsMessage() {
|
|
6471
|
+
var e_1, _a, e_2, _b;
|
|
6472
|
+
/**
|
|
6473
|
+
* Mixes registered LLM tools from $llmToolsMetadataRegister and $llmToolsRegister
|
|
6474
|
+
*/
|
|
6475
|
+
var all = [];
|
|
6476
|
+
var _loop_1 = function (packageName, className) {
|
|
6477
|
+
if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
|
|
6478
|
+
return "continue";
|
|
6479
|
+
}
|
|
6480
|
+
all.push({ packageName: packageName, className: className });
|
|
6673
6481
|
};
|
|
6674
|
-
|
|
6675
|
-
|
|
6676
|
-
|
|
6677
|
-
|
|
6678
|
-
|
|
6679
|
-
* @param value
|
|
6680
|
-
*
|
|
6681
|
-
* @private utility for initializating UncertainNumber
|
|
6682
|
-
*/
|
|
6683
|
-
function uncertainNumber(value) {
|
|
6684
|
-
if (value === null || value === undefined || Number.isNaN(value)) {
|
|
6685
|
-
return { value: 0, isUncertain: true };
|
|
6686
|
-
}
|
|
6687
|
-
return { value: value };
|
|
6688
|
-
}
|
|
6689
|
-
|
|
6690
|
-
/**
|
|
6691
|
-
* Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
|
|
6692
|
-
*
|
|
6693
|
-
* @param promptContent The content of the prompt
|
|
6694
|
-
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
6695
|
-
* @param rawResponse The raw response from Anthropic Claude API
|
|
6696
|
-
* @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
|
|
6697
|
-
* @private internal utility of `AnthropicClaudeExecutionTools`
|
|
6698
|
-
*/
|
|
6699
|
-
function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
6700
|
-
resultContent, rawResponse) {
|
|
6701
|
-
var _a, _b;
|
|
6702
|
-
if (rawResponse.usage === undefined) {
|
|
6703
|
-
throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
|
|
6482
|
+
try {
|
|
6483
|
+
for (var _c = __values($llmToolsMetadataRegister.list()), _d = _c.next(); !_d.done; _d = _c.next()) {
|
|
6484
|
+
var _e = _d.value, packageName = _e.packageName, className = _e.className;
|
|
6485
|
+
_loop_1(packageName, className);
|
|
6486
|
+
}
|
|
6704
6487
|
}
|
|
6705
|
-
|
|
6706
|
-
|
|
6488
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
6489
|
+
finally {
|
|
6490
|
+
try {
|
|
6491
|
+
if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
|
|
6492
|
+
}
|
|
6493
|
+
finally { if (e_1) throw e_1.error; }
|
|
6707
6494
|
}
|
|
6708
|
-
var
|
|
6709
|
-
|
|
6710
|
-
|
|
6711
|
-
|
|
6712
|
-
|
|
6713
|
-
|
|
6495
|
+
var _loop_2 = function (packageName, className) {
|
|
6496
|
+
if (all.some(function (item) { return item.packageName === packageName && item.className === className; })) {
|
|
6497
|
+
return "continue";
|
|
6498
|
+
}
|
|
6499
|
+
all.push({ packageName: packageName, className: className });
|
|
6500
|
+
};
|
|
6501
|
+
try {
|
|
6502
|
+
for (var _f = __values($llmToolsRegister.list()), _g = _f.next(); !_g.done; _g = _f.next()) {
|
|
6503
|
+
var _h = _g.value, packageName = _h.packageName, className = _h.className;
|
|
6504
|
+
_loop_2(packageName, className);
|
|
6505
|
+
}
|
|
6714
6506
|
}
|
|
6715
|
-
|
|
6716
|
-
|
|
6507
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
6508
|
+
finally {
|
|
6509
|
+
try {
|
|
6510
|
+
if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
|
|
6511
|
+
}
|
|
6512
|
+
finally { if (e_2) throw e_2.error; }
|
|
6717
6513
|
}
|
|
6718
|
-
|
|
6719
|
-
|
|
6720
|
-
|
|
6721
|
-
|
|
6722
|
-
|
|
6514
|
+
var metadata = all.map(function (metadata) {
|
|
6515
|
+
var isMetadataAviailable = $llmToolsMetadataRegister
|
|
6516
|
+
.list()
|
|
6517
|
+
.find(function (_a) {
|
|
6518
|
+
var packageName = _a.packageName, className = _a.className;
|
|
6519
|
+
return metadata.packageName === packageName && metadata.className === className;
|
|
6520
|
+
});
|
|
6521
|
+
var isInstalled = $llmToolsRegister
|
|
6522
|
+
.list()
|
|
6523
|
+
.find(function (_a) {
|
|
6524
|
+
var packageName = _a.packageName, className = _a.className;
|
|
6525
|
+
return metadata.packageName === packageName && metadata.className === className;
|
|
6526
|
+
});
|
|
6527
|
+
return __assign(__assign({}, metadata), { isMetadataAviailable: isMetadataAviailable, isInstalled: isInstalled });
|
|
6528
|
+
});
|
|
6529
|
+
return spaceTrim(function (block) { return "\n Available LLM providers are:\n ".concat(block(metadata
|
|
6530
|
+
.map(function (_a, i) {
|
|
6531
|
+
var packageName = _a.packageName, className = _a.className, isMetadataAviailable = _a.isMetadataAviailable, isInstalled = _a.isInstalled;
|
|
6532
|
+
var more;
|
|
6533
|
+
if (just(false)) {
|
|
6534
|
+
more = '';
|
|
6535
|
+
}
|
|
6536
|
+
else if (!isMetadataAviailable && !isInstalled) {
|
|
6537
|
+
// TODO: [�][�] Maybe do allow to do auto-install if package not registered and not found
|
|
6538
|
+
more = "(not installed and no metadata, looks like a unexpected behavior)";
|
|
6539
|
+
}
|
|
6540
|
+
else if (isMetadataAviailable && !isInstalled) {
|
|
6541
|
+
// TODO: [�][�]
|
|
6542
|
+
more = "(not installed)";
|
|
6543
|
+
}
|
|
6544
|
+
else if (!isMetadataAviailable && isInstalled) {
|
|
6545
|
+
more = "(no metadata, looks like a unexpected behavior)";
|
|
6546
|
+
}
|
|
6547
|
+
else if (isMetadataAviailable && isInstalled) {
|
|
6548
|
+
more = "(installed)";
|
|
6549
|
+
}
|
|
6550
|
+
else {
|
|
6551
|
+
more = "(unknown state, looks like a unexpected behavior)";
|
|
6552
|
+
}
|
|
6553
|
+
return "".concat(i + 1, ") `").concat(className, "` from `").concat(packageName, "` ").concat(more);
|
|
6554
|
+
})
|
|
6555
|
+
.join('\n')), "\n "); });
|
|
6723
6556
|
}
|
|
6724
|
-
/**
|
|
6725
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
6726
|
-
*/
|
|
6727
6557
|
|
|
6728
6558
|
/**
|
|
6729
|
-
*
|
|
6559
|
+
* @@@
|
|
6730
6560
|
*
|
|
6731
|
-
*
|
|
6732
|
-
*
|
|
6561
|
+
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
6562
|
+
*
|
|
6563
|
+
* @returns @@@
|
|
6564
|
+
* @public exported from `@promptbook/core`
|
|
6733
6565
|
*/
|
|
6734
|
-
|
|
6735
|
-
|
|
6736
|
-
|
|
6737
|
-
|
|
6738
|
-
|
|
6739
|
-
|
|
6740
|
-
|
|
6741
|
-
|
|
6742
|
-
|
|
6743
|
-
/**
|
|
6744
|
-
* Anthropic Claude API client.
|
|
6745
|
-
*/
|
|
6746
|
-
this.client = null;
|
|
6747
|
-
}
|
|
6748
|
-
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
6749
|
-
get: function () {
|
|
6750
|
-
return 'Anthropic Claude';
|
|
6751
|
-
},
|
|
6752
|
-
enumerable: false,
|
|
6753
|
-
configurable: true
|
|
6754
|
-
});
|
|
6755
|
-
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
|
|
6756
|
-
get: function () {
|
|
6757
|
-
return 'Use all models provided by Anthropic Claude';
|
|
6758
|
-
},
|
|
6759
|
-
enumerable: false,
|
|
6760
|
-
configurable: true
|
|
6761
|
-
});
|
|
6762
|
-
AnthropicClaudeExecutionTools.prototype.getClient = function () {
|
|
6763
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6764
|
-
var anthropicOptions;
|
|
6765
|
-
return __generator(this, function (_a) {
|
|
6766
|
-
if (this.client === null) {
|
|
6767
|
-
anthropicOptions = __assign({}, this.options);
|
|
6768
|
-
delete anthropicOptions.isVerbose;
|
|
6769
|
-
delete anthropicOptions.isProxied;
|
|
6770
|
-
this.client = new Anthropic(anthropicOptions);
|
|
6771
|
-
}
|
|
6772
|
-
return [2 /*return*/, this.client];
|
|
6773
|
-
});
|
|
6566
|
+
function createLlmToolsFromConfiguration(configuration, options) {
|
|
6567
|
+
if (options === void 0) { options = {}; }
|
|
6568
|
+
var _a = options.isVerbose, isVerbose = _a === void 0 ? IS_VERBOSE : _a;
|
|
6569
|
+
var llmTools = configuration.map(function (llmConfiguration) {
|
|
6570
|
+
var registeredItem = $llmToolsRegister
|
|
6571
|
+
.list()
|
|
6572
|
+
.find(function (_a) {
|
|
6573
|
+
var packageName = _a.packageName, className = _a.className;
|
|
6574
|
+
return llmConfiguration.packageName === packageName && llmConfiguration.className === className;
|
|
6774
6575
|
});
|
|
6775
|
-
|
|
6776
|
-
|
|
6777
|
-
* Check the `options` passed to `constructor`
|
|
6778
|
-
*/
|
|
6779
|
-
AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
|
|
6780
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6781
|
-
return __generator(this, function (_a) {
|
|
6782
|
-
switch (_a.label) {
|
|
6783
|
-
case 0: return [4 /*yield*/, this.getClient()];
|
|
6784
|
-
case 1:
|
|
6785
|
-
_a.sent();
|
|
6786
|
-
return [2 /*return*/];
|
|
6787
|
-
}
|
|
6788
|
-
});
|
|
6789
|
-
});
|
|
6790
|
-
};
|
|
6791
|
-
/**
|
|
6792
|
-
* List all available Anthropic Claude models that can be used
|
|
6793
|
-
*/
|
|
6794
|
-
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
6795
|
-
return ANTHROPIC_CLAUDE_MODELS;
|
|
6796
|
-
};
|
|
6797
|
-
/**
|
|
6798
|
-
* Calls Anthropic Claude API to use a chat model.
|
|
6799
|
-
*/
|
|
6800
|
-
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6801
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
6802
|
-
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
|
|
6803
|
-
return __generator(this, function (_a) {
|
|
6804
|
-
switch (_a.label) {
|
|
6805
|
-
case 0:
|
|
6806
|
-
if (this.options.isVerbose) {
|
|
6807
|
-
console.info('💬 Anthropic Claude callChatModel call');
|
|
6808
|
-
}
|
|
6809
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
6810
|
-
return [4 /*yield*/, this.getClient()];
|
|
6811
|
-
case 1:
|
|
6812
|
-
client = _a.sent();
|
|
6813
|
-
// TODO: [☂] Use here more modelRequirements
|
|
6814
|
-
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6815
|
-
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6816
|
-
}
|
|
6817
|
-
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6818
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6819
|
-
rawRequest = {
|
|
6820
|
-
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
6821
|
-
max_tokens: modelRequirements.maxTokens || 4096,
|
|
6822
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6823
|
-
temperature: modelRequirements.temperature,
|
|
6824
|
-
system: modelRequirements.systemMessage,
|
|
6825
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6826
|
-
// <- Note: [🧆]
|
|
6827
|
-
messages: [
|
|
6828
|
-
{
|
|
6829
|
-
role: 'user',
|
|
6830
|
-
content: rawPromptContent,
|
|
6831
|
-
},
|
|
6832
|
-
],
|
|
6833
|
-
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
6834
|
-
};
|
|
6835
|
-
start = getCurrentIsoDate();
|
|
6836
|
-
if (this.options.isVerbose) {
|
|
6837
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
6838
|
-
}
|
|
6839
|
-
return [4 /*yield*/, client.messages.create(rawRequest)];
|
|
6840
|
-
case 2:
|
|
6841
|
-
rawResponse = _a.sent();
|
|
6842
|
-
if (this.options.isVerbose) {
|
|
6843
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
6844
|
-
}
|
|
6845
|
-
if (!rawResponse.content[0]) {
|
|
6846
|
-
throw new PipelineExecutionError('No content from Anthropic Claude');
|
|
6847
|
-
}
|
|
6848
|
-
if (rawResponse.content.length > 1) {
|
|
6849
|
-
throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
|
|
6850
|
-
}
|
|
6851
|
-
contentBlock = rawResponse.content[0];
|
|
6852
|
-
if (contentBlock.type !== 'text') {
|
|
6853
|
-
throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
|
|
6854
|
-
}
|
|
6855
|
-
resultContent = contentBlock.text;
|
|
6856
|
-
// eslint-disable-next-line prefer-const
|
|
6857
|
-
complete = getCurrentIsoDate();
|
|
6858
|
-
usage = computeAnthropicClaudeUsage(content, '', rawResponse);
|
|
6859
|
-
return [2 /*return*/, {
|
|
6860
|
-
content: resultContent,
|
|
6861
|
-
modelName: rawResponse.model,
|
|
6862
|
-
timing: {
|
|
6863
|
-
start: start,
|
|
6864
|
-
complete: complete,
|
|
6865
|
-
},
|
|
6866
|
-
usage: usage,
|
|
6867
|
-
rawPromptContent: rawPromptContent,
|
|
6868
|
-
rawRequest: rawRequest,
|
|
6869
|
-
rawResponse: rawResponse,
|
|
6870
|
-
// <- [🗯]
|
|
6871
|
-
}];
|
|
6872
|
-
}
|
|
6873
|
-
});
|
|
6874
|
-
});
|
|
6875
|
-
};
|
|
6876
|
-
/*
|
|
6877
|
-
TODO: [👏]
|
|
6878
|
-
public async callCompletionModel(
|
|
6879
|
-
prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
|
|
6880
|
-
): Promise<PromptCompletionResult> {
|
|
6881
|
-
|
|
6882
|
-
if (this.options.isVerbose) {
|
|
6883
|
-
console.info('🖋 Anthropic Claude callCompletionModel call');
|
|
6884
|
-
}
|
|
6885
|
-
|
|
6886
|
-
const { content, parameters, modelRequirements } = prompt;
|
|
6887
|
-
|
|
6888
|
-
// TODO: [☂] Use here more modelRequirements
|
|
6889
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
6890
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6891
|
-
}
|
|
6892
|
-
|
|
6893
|
-
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6894
|
-
const modelSettings = {
|
|
6895
|
-
model: modelName,
|
|
6896
|
-
max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
|
|
6897
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6898
|
-
// <- TODO: Use here `systemMessage`, `temperature` and `seed`
|
|
6899
|
-
};
|
|
6900
|
-
|
|
6901
|
-
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
6902
|
-
...modelSettings,
|
|
6903
|
-
prompt: rawPromptContent,
|
|
6904
|
-
user: this.options.user,
|
|
6905
|
-
};
|
|
6906
|
-
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
6907
|
-
let complete: string_date_iso8601;
|
|
6908
|
-
|
|
6909
|
-
if (this.options.isVerbose) {
|
|
6910
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
6911
|
-
}
|
|
6912
|
-
const rawResponse = await this.client.completions.create(rawRequest);
|
|
6913
|
-
if (this.options.isVerbose) {
|
|
6914
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
6915
|
-
}
|
|
6916
|
-
|
|
6917
|
-
if (!rawResponse.choices[0]) {
|
|
6918
|
-
throw new PipelineExecutionError('No choises from Anthropic Claude');
|
|
6919
|
-
}
|
|
6920
|
-
|
|
6921
|
-
if (rawResponse.choices.length > 1) {
|
|
6922
|
-
// TODO: This should be maybe only warning
|
|
6923
|
-
throw new PipelineExecutionError('More than one choise from Anthropic Claude');
|
|
6924
|
-
}
|
|
6925
|
-
|
|
6926
|
-
const resultContent = rawResponse.choices[0].text;
|
|
6927
|
-
// eslint-disable-next-line prefer-const
|
|
6928
|
-
complete = getCurrentIsoDate();
|
|
6929
|
-
const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [🐞] Compute usage * / } satisfies PromptResultUsage;
|
|
6930
|
-
|
|
6931
|
-
|
|
6932
|
-
|
|
6933
|
-
return {
|
|
6934
|
-
content: resultContent,
|
|
6935
|
-
modelName: rawResponse.model || model,
|
|
6936
|
-
timing: {
|
|
6937
|
-
start,
|
|
6938
|
-
complete,
|
|
6939
|
-
},
|
|
6940
|
-
usage,
|
|
6941
|
-
rawResponse,
|
|
6942
|
-
// <- [🗯]
|
|
6943
|
-
};
|
|
6944
|
-
}
|
|
6945
|
-
*/
|
|
6946
|
-
// <- Note: [🤖] callXxxModel
|
|
6947
|
-
/**
|
|
6948
|
-
* Get the model that should be used as default
|
|
6949
|
-
*/
|
|
6950
|
-
AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
|
|
6951
|
-
var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
|
|
6952
|
-
var modelName = _a.modelName;
|
|
6953
|
-
return modelName.startsWith(defaultModelName);
|
|
6954
|
-
});
|
|
6955
|
-
if (model === undefined) {
|
|
6956
|
-
throw new UnexpectedError(spaceTrim(function (block) {
|
|
6957
|
-
return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
|
|
6958
|
-
var modelName = _a.modelName;
|
|
6959
|
-
return "- \"".concat(modelName, "\"");
|
|
6960
|
-
}).join('\n')), "\n\n ");
|
|
6961
|
-
}));
|
|
6962
|
-
}
|
|
6963
|
-
return model;
|
|
6964
|
-
};
|
|
6965
|
-
/**
|
|
6966
|
-
* Default model for chat variant.
|
|
6967
|
-
*/
|
|
6968
|
-
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
|
|
6969
|
-
return this.getDefaultModel('claude-3-opus');
|
|
6970
|
-
};
|
|
6971
|
-
return AnthropicClaudeExecutionTools;
|
|
6972
|
-
}());
|
|
6973
|
-
/**
|
|
6974
|
-
* TODO: [🍆] JSON mode
|
|
6975
|
-
* TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
6976
|
-
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
6977
|
-
* TODO: Maybe make custom OpenAiError
|
|
6978
|
-
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
6979
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
6980
|
-
* TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
|
|
6981
|
-
*/
|
|
6982
|
-
|
|
6983
|
-
/**
|
|
6984
|
-
* Execution Tools for calling Anthropic Claude API.
|
|
6985
|
-
*
|
|
6986
|
-
* @public exported from `@promptbook/anthropic-claude`
|
|
6987
|
-
*/
|
|
6988
|
-
var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
|
|
6989
|
-
if (options.isProxied) {
|
|
6990
|
-
return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
|
|
6991
|
-
{
|
|
6992
|
-
title: 'Anthropic Claude (proxied)',
|
|
6993
|
-
packageName: '@promptbook/anthropic-claude',
|
|
6994
|
-
className: 'AnthropicClaudeExecutionTools',
|
|
6995
|
-
options: __assign(__assign({}, options), { isProxied: false }),
|
|
6996
|
-
},
|
|
6997
|
-
], models: ANTHROPIC_CLAUDE_MODELS }));
|
|
6998
|
-
}
|
|
6999
|
-
return new AnthropicClaudeExecutionTools(options);
|
|
7000
|
-
}, {
|
|
7001
|
-
packageName: '@promptbook/anthropic-claude',
|
|
7002
|
-
className: 'AnthropicClaudeExecutionTools',
|
|
7003
|
-
});
|
|
7004
|
-
/**
|
|
7005
|
-
* TODO: [🧠] !!!! Make anonymous this with all LLM providers
|
|
7006
|
-
* TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
|
|
7007
|
-
* TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
|
|
7008
|
-
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
7009
|
-
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
7010
|
-
*/
|
|
7011
|
-
|
|
7012
|
-
/**
|
|
7013
|
-
* List of available OpenAI models with pricing
|
|
7014
|
-
*
|
|
7015
|
-
* Note: Done at 2024-05-20
|
|
7016
|
-
*
|
|
7017
|
-
* @see https://platform.openai.com/docs/models/
|
|
7018
|
-
* @see https://openai.com/api/pricing/
|
|
7019
|
-
* @public exported from `@promptbook/openai`
|
|
7020
|
-
*/
|
|
7021
|
-
var OPENAI_MODELS = [
|
|
7022
|
-
/*/
|
|
7023
|
-
{
|
|
7024
|
-
modelTitle: 'dall-e-3',
|
|
7025
|
-
modelName: 'dall-e-3',
|
|
7026
|
-
},
|
|
7027
|
-
/**/
|
|
7028
|
-
/*/
|
|
7029
|
-
{
|
|
7030
|
-
modelTitle: 'whisper-1',
|
|
7031
|
-
modelName: 'whisper-1',
|
|
7032
|
-
},
|
|
7033
|
-
/**/
|
|
7034
|
-
/**/
|
|
7035
|
-
{
|
|
7036
|
-
modelVariant: 'COMPLETION',
|
|
7037
|
-
modelTitle: 'davinci-002',
|
|
7038
|
-
modelName: 'davinci-002',
|
|
7039
|
-
pricing: {
|
|
7040
|
-
prompt: computeUsage("$2.00 / 1M tokens"),
|
|
7041
|
-
output: computeUsage("$2.00 / 1M tokens"), // <- not sure
|
|
7042
|
-
},
|
|
7043
|
-
},
|
|
7044
|
-
/**/
|
|
7045
|
-
/*/
|
|
7046
|
-
{
|
|
7047
|
-
modelTitle: 'dall-e-2',
|
|
7048
|
-
modelName: 'dall-e-2',
|
|
7049
|
-
},
|
|
7050
|
-
/**/
|
|
7051
|
-
/**/
|
|
7052
|
-
{
|
|
7053
|
-
modelVariant: 'CHAT',
|
|
7054
|
-
modelTitle: 'gpt-3.5-turbo-16k',
|
|
7055
|
-
modelName: 'gpt-3.5-turbo-16k',
|
|
7056
|
-
pricing: {
|
|
7057
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
7058
|
-
output: computeUsage("$4.00 / 1M tokens"),
|
|
7059
|
-
},
|
|
7060
|
-
},
|
|
7061
|
-
/**/
|
|
7062
|
-
/*/
|
|
7063
|
-
{
|
|
7064
|
-
modelTitle: 'tts-1-hd-1106',
|
|
7065
|
-
modelName: 'tts-1-hd-1106',
|
|
7066
|
-
},
|
|
7067
|
-
/**/
|
|
7068
|
-
/*/
|
|
7069
|
-
{
|
|
7070
|
-
modelTitle: 'tts-1-hd',
|
|
7071
|
-
modelName: 'tts-1-hd',
|
|
7072
|
-
},
|
|
7073
|
-
/**/
|
|
7074
|
-
/**/
|
|
7075
|
-
{
|
|
7076
|
-
modelVariant: 'CHAT',
|
|
7077
|
-
modelTitle: 'gpt-4',
|
|
7078
|
-
modelName: 'gpt-4',
|
|
7079
|
-
pricing: {
|
|
7080
|
-
prompt: computeUsage("$30.00 / 1M tokens"),
|
|
7081
|
-
output: computeUsage("$60.00 / 1M tokens"),
|
|
7082
|
-
},
|
|
7083
|
-
},
|
|
7084
|
-
/**/
|
|
7085
|
-
/**/
|
|
7086
|
-
{
|
|
7087
|
-
modelVariant: 'CHAT',
|
|
7088
|
-
modelTitle: 'gpt-4-32k',
|
|
7089
|
-
modelName: 'gpt-4-32k',
|
|
7090
|
-
pricing: {
|
|
7091
|
-
prompt: computeUsage("$60.00 / 1M tokens"),
|
|
7092
|
-
output: computeUsage("$120.00 / 1M tokens"),
|
|
7093
|
-
},
|
|
7094
|
-
},
|
|
7095
|
-
/**/
|
|
7096
|
-
/*/
|
|
7097
|
-
{
|
|
7098
|
-
modelVariant: 'CHAT',
|
|
7099
|
-
modelTitle: 'gpt-4-0613',
|
|
7100
|
-
modelName: 'gpt-4-0613',
|
|
7101
|
-
pricing: {
|
|
7102
|
-
prompt: computeUsage(` / 1M tokens`),
|
|
7103
|
-
output: computeUsage(` / 1M tokens`),
|
|
7104
|
-
},
|
|
7105
|
-
},
|
|
7106
|
-
/**/
|
|
7107
|
-
/**/
|
|
7108
|
-
{
|
|
7109
|
-
modelVariant: 'CHAT',
|
|
7110
|
-
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
7111
|
-
modelName: 'gpt-4-turbo-2024-04-09',
|
|
7112
|
-
pricing: {
|
|
7113
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7114
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7115
|
-
},
|
|
7116
|
-
},
|
|
7117
|
-
/**/
|
|
7118
|
-
/**/
|
|
7119
|
-
{
|
|
7120
|
-
modelVariant: 'CHAT',
|
|
7121
|
-
modelTitle: 'gpt-3.5-turbo-1106',
|
|
7122
|
-
modelName: 'gpt-3.5-turbo-1106',
|
|
7123
|
-
pricing: {
|
|
7124
|
-
prompt: computeUsage("$1.00 / 1M tokens"),
|
|
7125
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7126
|
-
},
|
|
7127
|
-
},
|
|
7128
|
-
/**/
|
|
7129
|
-
/**/
|
|
7130
|
-
{
|
|
7131
|
-
modelVariant: 'CHAT',
|
|
7132
|
-
modelTitle: 'gpt-4-turbo',
|
|
7133
|
-
modelName: 'gpt-4-turbo',
|
|
7134
|
-
pricing: {
|
|
7135
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7136
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7137
|
-
},
|
|
7138
|
-
},
|
|
7139
|
-
/**/
|
|
7140
|
-
/**/
|
|
7141
|
-
{
|
|
7142
|
-
modelVariant: 'COMPLETION',
|
|
7143
|
-
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
7144
|
-
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
7145
|
-
pricing: {
|
|
7146
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7147
|
-
output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
|
|
7148
|
-
},
|
|
7149
|
-
},
|
|
7150
|
-
/**/
|
|
7151
|
-
/**/
|
|
7152
|
-
{
|
|
7153
|
-
modelVariant: 'COMPLETION',
|
|
7154
|
-
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
7155
|
-
modelName: 'gpt-3.5-turbo-instruct',
|
|
7156
|
-
pricing: {
|
|
7157
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7158
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7159
|
-
},
|
|
7160
|
-
},
|
|
7161
|
-
/**/
|
|
7162
|
-
/*/
|
|
7163
|
-
{
|
|
7164
|
-
modelTitle: 'tts-1',
|
|
7165
|
-
modelName: 'tts-1',
|
|
7166
|
-
},
|
|
7167
|
-
/**/
|
|
7168
|
-
/**/
|
|
7169
|
-
{
|
|
7170
|
-
modelVariant: 'CHAT',
|
|
7171
|
-
modelTitle: 'gpt-3.5-turbo',
|
|
7172
|
-
modelName: 'gpt-3.5-turbo',
|
|
7173
|
-
pricing: {
|
|
7174
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
7175
|
-
output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
|
|
7176
|
-
},
|
|
7177
|
-
},
|
|
7178
|
-
/**/
|
|
7179
|
-
/**/
|
|
7180
|
-
{
|
|
7181
|
-
modelVariant: 'CHAT',
|
|
7182
|
-
modelTitle: 'gpt-3.5-turbo-0301',
|
|
7183
|
-
modelName: 'gpt-3.5-turbo-0301',
|
|
7184
|
-
pricing: {
|
|
7185
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7186
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7187
|
-
},
|
|
7188
|
-
},
|
|
7189
|
-
/**/
|
|
7190
|
-
/**/
|
|
7191
|
-
{
|
|
7192
|
-
modelVariant: 'COMPLETION',
|
|
7193
|
-
modelTitle: 'babbage-002',
|
|
7194
|
-
modelName: 'babbage-002',
|
|
7195
|
-
pricing: {
|
|
7196
|
-
prompt: computeUsage("$0.40 / 1M tokens"),
|
|
7197
|
-
output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
|
|
7198
|
-
},
|
|
7199
|
-
},
|
|
7200
|
-
/**/
|
|
7201
|
-
/**/
|
|
7202
|
-
{
|
|
7203
|
-
modelVariant: 'CHAT',
|
|
7204
|
-
modelTitle: 'gpt-4-1106-preview',
|
|
7205
|
-
modelName: 'gpt-4-1106-preview',
|
|
7206
|
-
pricing: {
|
|
7207
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7208
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7209
|
-
},
|
|
7210
|
-
},
|
|
7211
|
-
/**/
|
|
7212
|
-
/**/
|
|
7213
|
-
{
|
|
7214
|
-
modelVariant: 'CHAT',
|
|
7215
|
-
modelTitle: 'gpt-4-0125-preview',
|
|
7216
|
-
modelName: 'gpt-4-0125-preview',
|
|
7217
|
-
pricing: {
|
|
7218
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7219
|
-
output: computeUsage("$30.00 / 1M tokens"),
|
|
7220
|
-
},
|
|
7221
|
-
},
|
|
7222
|
-
/**/
|
|
7223
|
-
/*/
|
|
7224
|
-
{
|
|
7225
|
-
modelTitle: 'tts-1-1106',
|
|
7226
|
-
modelName: 'tts-1-1106',
|
|
7227
|
-
},
|
|
7228
|
-
/**/
|
|
7229
|
-
/**/
|
|
7230
|
-
{
|
|
7231
|
-
modelVariant: 'CHAT',
|
|
7232
|
-
modelTitle: 'gpt-3.5-turbo-0125',
|
|
7233
|
-
modelName: 'gpt-3.5-turbo-0125',
|
|
7234
|
-
pricing: {
|
|
7235
|
-
prompt: computeUsage("$0.50 / 1M tokens"),
|
|
7236
|
-
output: computeUsage("$1.50 / 1M tokens"),
|
|
7237
|
-
},
|
|
7238
|
-
},
|
|
7239
|
-
/**/
|
|
7240
|
-
/**/
|
|
7241
|
-
{
|
|
7242
|
-
modelVariant: 'CHAT',
|
|
7243
|
-
modelTitle: 'gpt-4-turbo-preview',
|
|
7244
|
-
modelName: 'gpt-4-turbo-preview',
|
|
7245
|
-
pricing: {
|
|
7246
|
-
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
7247
|
-
output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
|
|
7248
|
-
},
|
|
7249
|
-
},
|
|
7250
|
-
/**/
|
|
7251
|
-
/**/
|
|
7252
|
-
{
|
|
7253
|
-
modelVariant: 'EMBEDDING',
|
|
7254
|
-
modelTitle: 'text-embedding-3-large',
|
|
7255
|
-
modelName: 'text-embedding-3-large',
|
|
7256
|
-
pricing: {
|
|
7257
|
-
prompt: computeUsage("$0.13 / 1M tokens"),
|
|
7258
|
-
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
7259
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
7260
|
-
},
|
|
7261
|
-
},
|
|
7262
|
-
/**/
|
|
7263
|
-
/**/
|
|
7264
|
-
{
|
|
7265
|
-
modelVariant: 'EMBEDDING',
|
|
7266
|
-
modelTitle: 'text-embedding-3-small',
|
|
7267
|
-
modelName: 'text-embedding-3-small',
|
|
7268
|
-
pricing: {
|
|
7269
|
-
prompt: computeUsage("$0.02 / 1M tokens"),
|
|
7270
|
-
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
7271
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
7272
|
-
},
|
|
7273
|
-
},
|
|
7274
|
-
/**/
|
|
7275
|
-
/**/
|
|
7276
|
-
{
|
|
7277
|
-
modelVariant: 'CHAT',
|
|
7278
|
-
modelTitle: 'gpt-3.5-turbo-0613',
|
|
7279
|
-
modelName: 'gpt-3.5-turbo-0613',
|
|
7280
|
-
pricing: {
|
|
7281
|
-
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
7282
|
-
output: computeUsage("$2.00 / 1M tokens"),
|
|
7283
|
-
},
|
|
7284
|
-
},
|
|
7285
|
-
/**/
|
|
7286
|
-
/**/
|
|
7287
|
-
{
|
|
7288
|
-
modelVariant: 'EMBEDDING',
|
|
7289
|
-
modelTitle: 'text-embedding-ada-002',
|
|
7290
|
-
modelName: 'text-embedding-ada-002',
|
|
7291
|
-
pricing: {
|
|
7292
|
-
prompt: computeUsage("$0.1 / 1M tokens"),
|
|
7293
|
-
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
7294
|
-
output: 0, // <- Note: [🆖] In Embedding models you dont pay for output
|
|
7295
|
-
},
|
|
7296
|
-
},
|
|
7297
|
-
/**/
|
|
7298
|
-
/*/
|
|
7299
|
-
{
|
|
7300
|
-
modelVariant: 'CHAT',
|
|
7301
|
-
modelTitle: 'gpt-4-1106-vision-preview',
|
|
7302
|
-
modelName: 'gpt-4-1106-vision-preview',
|
|
7303
|
-
},
|
|
7304
|
-
/**/
|
|
7305
|
-
/*/
|
|
7306
|
-
{
|
|
7307
|
-
modelVariant: 'CHAT',
|
|
7308
|
-
modelTitle: 'gpt-4-vision-preview',
|
|
7309
|
-
modelName: 'gpt-4-vision-preview',
|
|
7310
|
-
pricing: {
|
|
7311
|
-
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
7312
|
-
output: computeUsage(`$30.00 / 1M tokens`),
|
|
7313
|
-
},
|
|
7314
|
-
},
|
|
7315
|
-
/**/
|
|
7316
|
-
/**/
|
|
7317
|
-
{
|
|
7318
|
-
modelVariant: 'CHAT',
|
|
7319
|
-
modelTitle: 'gpt-4o-2024-05-13',
|
|
7320
|
-
modelName: 'gpt-4o-2024-05-13',
|
|
7321
|
-
pricing: {
|
|
7322
|
-
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
7323
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
7324
|
-
},
|
|
7325
|
-
},
|
|
7326
|
-
/**/
|
|
7327
|
-
/**/
|
|
7328
|
-
{
|
|
7329
|
-
modelVariant: 'CHAT',
|
|
7330
|
-
modelTitle: 'gpt-4o',
|
|
7331
|
-
modelName: 'gpt-4o',
|
|
7332
|
-
pricing: {
|
|
7333
|
-
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
7334
|
-
output: computeUsage("$15.00 / 1M tokens"),
|
|
7335
|
-
},
|
|
7336
|
-
},
|
|
7337
|
-
/**/
|
|
7338
|
-
/**/
|
|
7339
|
-
{
|
|
7340
|
-
modelVariant: 'CHAT',
|
|
7341
|
-
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
7342
|
-
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
7343
|
-
pricing: {
|
|
7344
|
-
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
7345
|
-
output: computeUsage("$4.00 / 1M tokens"),
|
|
7346
|
-
},
|
|
7347
|
-
},
|
|
7348
|
-
/**/
|
|
7349
|
-
];
|
|
7350
|
-
/**
|
|
7351
|
-
* Note: [🤖] Add models of new variant
|
|
7352
|
-
* TODO: [🧠] Some mechanism to propagate unsureness
|
|
7353
|
-
* TODO: [🎰] Some mechanism to auto-update available models
|
|
7354
|
-
* TODO: [🎰][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
7355
|
-
* TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
7356
|
-
* @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
7357
|
-
* @see https://openai.com/api/pricing/
|
|
7358
|
-
* @see /other/playground/playground.ts
|
|
7359
|
-
* TODO: [🍓] Make better
|
|
7360
|
-
* TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
|
|
7361
|
-
* TODO: [🚸] Not all models are compatible with JSON mode, add this information here and use it
|
|
7362
|
-
*/
|
|
7363
|
-
|
|
7364
|
-
/**
|
|
7365
|
-
* Execution Tools for calling Azure OpenAI API.
|
|
7366
|
-
*
|
|
7367
|
-
* @public exported from `@promptbook/azure-openai`
|
|
7368
|
-
*/
|
|
7369
|
-
var AzureOpenAiExecutionTools = /** @class */ (function () {
|
|
7370
|
-
/**
|
|
7371
|
-
* Creates OpenAI Execution Tools.
|
|
7372
|
-
*
|
|
7373
|
-
* @param options which are relevant are directly passed to the OpenAI client
|
|
7374
|
-
*/
|
|
7375
|
-
function AzureOpenAiExecutionTools(options) {
|
|
7376
|
-
this.options = options;
|
|
7377
|
-
/**
|
|
7378
|
-
* OpenAI Azure API client.
|
|
7379
|
-
*/
|
|
7380
|
-
this.client = null;
|
|
7381
|
-
}
|
|
7382
|
-
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
|
|
7383
|
-
get: function () {
|
|
7384
|
-
return 'Azure OpenAI';
|
|
7385
|
-
},
|
|
7386
|
-
enumerable: false,
|
|
7387
|
-
configurable: true
|
|
7388
|
-
});
|
|
7389
|
-
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "description", {
|
|
7390
|
-
get: function () {
|
|
7391
|
-
return 'Use all models trained by OpenAI provided by Azure';
|
|
7392
|
-
},
|
|
7393
|
-
enumerable: false,
|
|
7394
|
-
configurable: true
|
|
7395
|
-
});
|
|
7396
|
-
AzureOpenAiExecutionTools.prototype.getClient = function () {
|
|
7397
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7398
|
-
return __generator(this, function (_a) {
|
|
7399
|
-
if (this.client === null) {
|
|
7400
|
-
this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
|
|
7401
|
-
}
|
|
7402
|
-
return [2 /*return*/, this.client];
|
|
7403
|
-
});
|
|
7404
|
-
});
|
|
7405
|
-
};
|
|
7406
|
-
/**
|
|
7407
|
-
* Check the `options` passed to `constructor`
|
|
7408
|
-
*/
|
|
7409
|
-
AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
7410
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7411
|
-
return __generator(this, function (_a) {
|
|
7412
|
-
switch (_a.label) {
|
|
7413
|
-
case 0: return [4 /*yield*/, this.getClient()];
|
|
7414
|
-
case 1:
|
|
7415
|
-
_a.sent();
|
|
7416
|
-
return [2 /*return*/];
|
|
7417
|
-
}
|
|
7418
|
-
});
|
|
7419
|
-
});
|
|
7420
|
-
};
|
|
7421
|
-
/**
|
|
7422
|
-
* List all available Azure OpenAI models that can be used
|
|
7423
|
-
*/
|
|
7424
|
-
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
7425
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7426
|
-
return __generator(this, function (_a) {
|
|
7427
|
-
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
7428
|
-
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
7429
|
-
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
7430
|
-
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
7431
|
-
return ({
|
|
7432
|
-
modelTitle: "Azure ".concat(modelTitle),
|
|
7433
|
-
modelName: modelName,
|
|
7434
|
-
modelVariant: modelVariant,
|
|
7435
|
-
});
|
|
7436
|
-
})];
|
|
7437
|
-
});
|
|
7438
|
-
});
|
|
7439
|
-
};
|
|
7440
|
-
/**
|
|
7441
|
-
* Calls OpenAI API to use a chat model.
|
|
7442
|
-
*/
|
|
7443
|
-
AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7444
|
-
var _a, _b;
|
|
7445
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7446
|
-
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
7447
|
-
return __generator(this, function (_c) {
|
|
7448
|
-
switch (_c.label) {
|
|
7449
|
-
case 0:
|
|
7450
|
-
if (this.options.isVerbose) {
|
|
7451
|
-
console.info('💬 OpenAI callChatModel call');
|
|
7452
|
-
}
|
|
7453
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7454
|
-
return [4 /*yield*/, this.getClient()];
|
|
7455
|
-
case 1:
|
|
7456
|
-
client = _c.sent();
|
|
7457
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7458
|
-
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7459
|
-
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7460
|
-
}
|
|
7461
|
-
_c.label = 2;
|
|
7462
|
-
case 2:
|
|
7463
|
-
_c.trys.push([2, 4, , 5]);
|
|
7464
|
-
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7465
|
-
modelSettings = {
|
|
7466
|
-
maxTokens: modelRequirements.maxTokens,
|
|
7467
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7468
|
-
temperature: modelRequirements.temperature,
|
|
7469
|
-
user: this.options.user,
|
|
7470
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7471
|
-
// <- Note: [🧆]
|
|
7472
|
-
};
|
|
7473
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7474
|
-
messages = __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
7475
|
-
? []
|
|
7476
|
-
: [
|
|
7477
|
-
{
|
|
7478
|
-
role: 'system',
|
|
7479
|
-
content: modelRequirements.systemMessage,
|
|
7480
|
-
},
|
|
7481
|
-
])), false), [
|
|
7482
|
-
{
|
|
7483
|
-
role: 'user',
|
|
7484
|
-
content: rawPromptContent,
|
|
7485
|
-
},
|
|
7486
|
-
], false);
|
|
7487
|
-
start = getCurrentIsoDate();
|
|
7488
|
-
complete = void 0;
|
|
7489
|
-
if (this.options.isVerbose) {
|
|
7490
|
-
console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
|
|
7491
|
-
}
|
|
7492
|
-
rawRequest = [modelName, messages, modelSettings];
|
|
7493
|
-
return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
7494
|
-
case 3:
|
|
7495
|
-
rawResponse = _c.sent();
|
|
7496
|
-
if (this.options.isVerbose) {
|
|
7497
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7498
|
-
}
|
|
7499
|
-
if (!rawResponse.choices[0]) {
|
|
7500
|
-
throw new PipelineExecutionError('No choises from Azure OpenAI');
|
|
7501
|
-
}
|
|
7502
|
-
if (rawResponse.choices.length > 1) {
|
|
7503
|
-
// TODO: This should be maybe only warning
|
|
7504
|
-
throw new PipelineExecutionError('More than one choise from Azure OpenAI');
|
|
7505
|
-
}
|
|
7506
|
-
if (!rawResponse.choices[0].message || !rawResponse.choices[0].message.content) {
|
|
7507
|
-
throw new PipelineExecutionError('Empty response from Azure OpenAI');
|
|
7508
|
-
}
|
|
7509
|
-
resultContent = rawResponse.choices[0].message.content;
|
|
7510
|
-
// eslint-disable-next-line prefer-const
|
|
7511
|
-
complete = getCurrentIsoDate();
|
|
7512
|
-
usage = {
|
|
7513
|
-
price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
|
|
7514
|
-
input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
|
|
7515
|
-
output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
|
|
7516
|
-
};
|
|
7517
|
-
return [2 /*return*/, {
|
|
7518
|
-
content: resultContent,
|
|
7519
|
-
modelName: modelName,
|
|
7520
|
-
timing: {
|
|
7521
|
-
start: start,
|
|
7522
|
-
complete: complete,
|
|
7523
|
-
},
|
|
7524
|
-
usage: usage,
|
|
7525
|
-
rawPromptContent: rawPromptContent,
|
|
7526
|
-
rawRequest: rawRequest,
|
|
7527
|
-
rawResponse: rawResponse,
|
|
7528
|
-
// <- [🗯]
|
|
7529
|
-
}];
|
|
7530
|
-
case 4:
|
|
7531
|
-
error_1 = _c.sent();
|
|
7532
|
-
throw this.transformAzureError(error_1);
|
|
7533
|
-
case 5: return [2 /*return*/];
|
|
7534
|
-
}
|
|
7535
|
-
});
|
|
7536
|
-
});
|
|
7537
|
-
};
|
|
7538
|
-
/**
|
|
7539
|
-
* Calls Azure OpenAI API to use a complete model.
|
|
7540
|
-
*/
|
|
7541
|
-
AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
7542
|
-
var _a, _b;
|
|
7543
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7544
|
-
var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
7545
|
-
return __generator(this, function (_c) {
|
|
7546
|
-
switch (_c.label) {
|
|
7547
|
-
case 0:
|
|
7548
|
-
if (this.options.isVerbose) {
|
|
7549
|
-
console.info('🖋 OpenAI callCompletionModel call');
|
|
7550
|
-
}
|
|
7551
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7552
|
-
return [4 /*yield*/, this.getClient()];
|
|
7553
|
-
case 1:
|
|
7554
|
-
client = _c.sent();
|
|
7555
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7556
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7557
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
7558
|
-
}
|
|
7559
|
-
_c.label = 2;
|
|
7560
|
-
case 2:
|
|
7561
|
-
_c.trys.push([2, 4, , 5]);
|
|
7562
|
-
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
7563
|
-
modelSettings = {
|
|
7564
|
-
maxTokens: modelRequirements.maxTokens || 2000,
|
|
7565
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7566
|
-
temperature: modelRequirements.temperature,
|
|
7567
|
-
user: this.options.user,
|
|
7568
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7569
|
-
// <- Note: [🧆]
|
|
7570
|
-
};
|
|
7571
|
-
start = getCurrentIsoDate();
|
|
7572
|
-
complete = void 0;
|
|
7573
|
-
if (this.options.isVerbose) {
|
|
7574
|
-
console.info(colors.bgWhite('content'), JSON.stringify(content, null, 4));
|
|
7575
|
-
console.info(colors.bgWhite('parameters'), JSON.stringify(parameters, null, 4));
|
|
7576
|
-
}
|
|
7577
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7578
|
-
rawRequest = [
|
|
7579
|
-
modelName,
|
|
7580
|
-
[rawPromptContent],
|
|
7581
|
-
modelSettings,
|
|
7582
|
-
];
|
|
7583
|
-
return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
7584
|
-
case 3:
|
|
7585
|
-
rawResponse = _c.sent();
|
|
7586
|
-
if (this.options.isVerbose) {
|
|
7587
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7588
|
-
}
|
|
7589
|
-
if (!rawResponse.choices[0]) {
|
|
7590
|
-
throw new PipelineExecutionError('No choises from OpenAI');
|
|
7591
|
-
}
|
|
7592
|
-
if (rawResponse.choices.length > 1) {
|
|
7593
|
-
// TODO: This should be maybe only warning
|
|
7594
|
-
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
7595
|
-
}
|
|
7596
|
-
resultContent = rawResponse.choices[0].text;
|
|
7597
|
-
// eslint-disable-next-line prefer-const
|
|
7598
|
-
complete = getCurrentIsoDate();
|
|
7599
|
-
usage = {
|
|
7600
|
-
price: uncertainNumber() /* <- TODO: [🐞] Compute usage */,
|
|
7601
|
-
input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
|
|
7602
|
-
output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
|
|
7603
|
-
};
|
|
7604
|
-
return [2 /*return*/, {
|
|
7605
|
-
content: resultContent,
|
|
7606
|
-
modelName: modelName,
|
|
7607
|
-
timing: {
|
|
7608
|
-
start: start,
|
|
7609
|
-
complete: complete,
|
|
7610
|
-
},
|
|
7611
|
-
usage: usage,
|
|
7612
|
-
rawPromptContent: rawPromptContent,
|
|
7613
|
-
rawRequest: rawRequest,
|
|
7614
|
-
rawResponse: rawResponse,
|
|
7615
|
-
// <- [🗯]
|
|
7616
|
-
}];
|
|
7617
|
-
case 4:
|
|
7618
|
-
error_2 = _c.sent();
|
|
7619
|
-
throw this.transformAzureError(error_2);
|
|
7620
|
-
case 5: return [2 /*return*/];
|
|
7621
|
-
}
|
|
7622
|
-
});
|
|
7623
|
-
});
|
|
7624
|
-
};
|
|
7625
|
-
// <- Note: [🤖] callXxxModel
|
|
7626
|
-
/**
|
|
7627
|
-
* Changes Azure error (which is not propper Error but object) to propper Error
|
|
7628
|
-
*/
|
|
7629
|
-
AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
|
|
7630
|
-
if (typeof azureError !== 'object' || azureError === null) {
|
|
7631
|
-
return new PipelineExecutionError("Unknown Azure OpenAI error");
|
|
7632
|
-
}
|
|
7633
|
-
var code = azureError.code, message = azureError.message;
|
|
7634
|
-
return new PipelineExecutionError("".concat(code, ": ").concat(message));
|
|
7635
|
-
};
|
|
7636
|
-
return AzureOpenAiExecutionTools;
|
|
7637
|
-
}());
|
|
7638
|
-
/**
|
|
7639
|
-
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
7640
|
-
* TODO: Maybe make custom AzureOpenAiError
|
|
7641
|
-
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
7642
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
7643
|
-
*/
|
|
7644
|
-
|
|
7645
|
-
/**
|
|
7646
|
-
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
7647
|
-
*
|
|
7648
|
-
* @param promptContent The content of the prompt
|
|
7649
|
-
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
7650
|
-
* @param rawResponse The raw response from OpenAI API
|
|
7651
|
-
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
7652
|
-
* @private internal utility of `OpenAiExecutionTools`
|
|
7653
|
-
*/
|
|
7654
|
-
function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
7655
|
-
resultContent, rawResponse) {
|
|
7656
|
-
var _a, _b;
|
|
7657
|
-
if (rawResponse.usage === undefined) {
|
|
7658
|
-
throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
|
|
7659
|
-
}
|
|
7660
|
-
if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
|
|
7661
|
-
throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
|
|
7662
|
-
}
|
|
7663
|
-
var inputTokens = rawResponse.usage.prompt_tokens;
|
|
7664
|
-
var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
|
|
7665
|
-
var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
|
|
7666
|
-
var price;
|
|
7667
|
-
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
7668
|
-
price = uncertainNumber();
|
|
7669
|
-
}
|
|
7670
|
-
else {
|
|
7671
|
-
price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
|
|
7672
|
-
}
|
|
7673
|
-
return {
|
|
7674
|
-
price: price,
|
|
7675
|
-
input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
|
|
7676
|
-
output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
|
|
7677
|
-
};
|
|
7678
|
-
}
|
|
7679
|
-
/**
|
|
7680
|
-
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
7681
|
-
*/
|
|
7682
|
-
|
|
7683
|
-
/**
|
|
7684
|
-
* Execution Tools for calling OpenAI API
|
|
7685
|
-
*
|
|
7686
|
-
* @public exported from `@promptbook/openai`
|
|
7687
|
-
*/
|
|
7688
|
-
var OpenAiExecutionTools = /** @class */ (function () {
|
|
7689
|
-
/**
|
|
7690
|
-
* Creates OpenAI Execution Tools.
|
|
7691
|
-
*
|
|
7692
|
-
* @param options which are relevant are directly passed to the OpenAI client
|
|
7693
|
-
*/
|
|
7694
|
-
function OpenAiExecutionTools(options) {
|
|
7695
|
-
if (options === void 0) { options = {}; }
|
|
7696
|
-
this.options = options;
|
|
7697
|
-
/**
|
|
7698
|
-
* OpenAI API client.
|
|
7699
|
-
*/
|
|
7700
|
-
this.client = null;
|
|
7701
|
-
}
|
|
7702
|
-
Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
|
|
7703
|
-
get: function () {
|
|
7704
|
-
return 'OpenAI';
|
|
7705
|
-
},
|
|
7706
|
-
enumerable: false,
|
|
7707
|
-
configurable: true
|
|
7708
|
-
});
|
|
7709
|
-
Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
|
|
7710
|
-
get: function () {
|
|
7711
|
-
return 'Use all models provided by OpenAI';
|
|
7712
|
-
},
|
|
7713
|
-
enumerable: false,
|
|
7714
|
-
configurable: true
|
|
7715
|
-
});
|
|
7716
|
-
OpenAiExecutionTools.prototype.getClient = function () {
|
|
7717
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7718
|
-
var openAiOptions;
|
|
7719
|
-
return __generator(this, function (_a) {
|
|
7720
|
-
if (this.client === null) {
|
|
7721
|
-
openAiOptions = __assign({}, this.options);
|
|
7722
|
-
delete openAiOptions.isVerbose;
|
|
7723
|
-
delete openAiOptions.user;
|
|
7724
|
-
this.client = new OpenAI(__assign({}, openAiOptions));
|
|
7725
|
-
}
|
|
7726
|
-
return [2 /*return*/, this.client];
|
|
7727
|
-
});
|
|
7728
|
-
});
|
|
7729
|
-
};
|
|
7730
|
-
/**
|
|
7731
|
-
* Check the `options` passed to `constructor`
|
|
7732
|
-
*/
|
|
7733
|
-
OpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
7734
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7735
|
-
return __generator(this, function (_a) {
|
|
7736
|
-
switch (_a.label) {
|
|
7737
|
-
case 0: return [4 /*yield*/, this.getClient()];
|
|
7738
|
-
case 1:
|
|
7739
|
-
_a.sent();
|
|
7740
|
-
return [2 /*return*/];
|
|
7741
|
-
}
|
|
7742
|
-
});
|
|
7743
|
-
});
|
|
7744
|
-
};
|
|
7745
|
-
/**
|
|
7746
|
-
* List all available OpenAI models that can be used
|
|
7747
|
-
*/
|
|
7748
|
-
OpenAiExecutionTools.prototype.listModels = function () {
|
|
7749
|
-
/*
|
|
7750
|
-
Note: Dynamic lising of the models
|
|
7751
|
-
const models = await this.openai.models.list({});
|
|
7752
|
-
|
|
7753
|
-
console.log({ models });
|
|
7754
|
-
console.log(models.data);
|
|
7755
|
-
*/
|
|
7756
|
-
return OPENAI_MODELS;
|
|
7757
|
-
};
|
|
7758
|
-
/**
|
|
7759
|
-
* Calls OpenAI API to use a chat model.
|
|
7760
|
-
*/
|
|
7761
|
-
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
7762
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7763
|
-
var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7764
|
-
return __generator(this, function (_a) {
|
|
7765
|
-
switch (_a.label) {
|
|
7766
|
-
case 0:
|
|
7767
|
-
if (this.options.isVerbose) {
|
|
7768
|
-
console.info('💬 OpenAI callChatModel call', { prompt: prompt });
|
|
7769
|
-
}
|
|
7770
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
|
|
7771
|
-
return [4 /*yield*/, this.getClient()];
|
|
7772
|
-
case 1:
|
|
7773
|
-
client = _a.sent();
|
|
7774
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7775
|
-
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
7776
|
-
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
7777
|
-
}
|
|
7778
|
-
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
7779
|
-
modelSettings = {
|
|
7780
|
-
model: modelName,
|
|
7781
|
-
max_tokens: modelRequirements.maxTokens,
|
|
7782
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7783
|
-
temperature: modelRequirements.temperature,
|
|
7784
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7785
|
-
// <- Note: [🧆]
|
|
7786
|
-
};
|
|
7787
|
-
if (expectFormat === 'JSON') {
|
|
7788
|
-
modelSettings.response_format = {
|
|
7789
|
-
type: 'json_object',
|
|
7790
|
-
};
|
|
7791
|
-
}
|
|
7792
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7793
|
-
rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
7794
|
-
? []
|
|
7795
|
-
: [
|
|
7796
|
-
{
|
|
7797
|
-
role: 'system',
|
|
7798
|
-
content: modelRequirements.systemMessage,
|
|
7799
|
-
},
|
|
7800
|
-
])), false), [
|
|
7801
|
-
{
|
|
7802
|
-
role: 'user',
|
|
7803
|
-
content: rawPromptContent,
|
|
7804
|
-
},
|
|
7805
|
-
], false), user: this.options.user });
|
|
7806
|
-
start = getCurrentIsoDate();
|
|
7807
|
-
if (this.options.isVerbose) {
|
|
7808
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7809
|
-
}
|
|
7810
|
-
return [4 /*yield*/, client.chat.completions.create(rawRequest)];
|
|
7811
|
-
case 2:
|
|
7812
|
-
rawResponse = _a.sent();
|
|
7813
|
-
if (this.options.isVerbose) {
|
|
7814
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7815
|
-
}
|
|
7816
|
-
if (!rawResponse.choices[0]) {
|
|
7817
|
-
throw new PipelineExecutionError('No choises from OpenAI');
|
|
7818
|
-
}
|
|
7819
|
-
if (rawResponse.choices.length > 1) {
|
|
7820
|
-
// TODO: This should be maybe only warning
|
|
7821
|
-
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
7822
|
-
}
|
|
7823
|
-
resultContent = rawResponse.choices[0].message.content;
|
|
7824
|
-
// eslint-disable-next-line prefer-const
|
|
7825
|
-
complete = getCurrentIsoDate();
|
|
7826
|
-
usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
7827
|
-
if (resultContent === null) {
|
|
7828
|
-
throw new PipelineExecutionError('No response message from OpenAI');
|
|
7829
|
-
}
|
|
7830
|
-
return [2 /*return*/, {
|
|
7831
|
-
content: resultContent,
|
|
7832
|
-
modelName: rawResponse.model || modelName,
|
|
7833
|
-
timing: {
|
|
7834
|
-
start: start,
|
|
7835
|
-
complete: complete,
|
|
7836
|
-
},
|
|
7837
|
-
usage: usage,
|
|
7838
|
-
rawPromptContent: rawPromptContent,
|
|
7839
|
-
rawRequest: rawRequest,
|
|
7840
|
-
rawResponse: rawResponse,
|
|
7841
|
-
// <- [🗯]
|
|
7842
|
-
}];
|
|
7843
|
-
}
|
|
7844
|
-
});
|
|
7845
|
-
});
|
|
7846
|
-
};
|
|
7847
|
-
/**
|
|
7848
|
-
* Calls OpenAI API to use a complete model.
|
|
7849
|
-
*/
|
|
7850
|
-
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
7851
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7852
|
-
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7853
|
-
return __generator(this, function (_a) {
|
|
7854
|
-
switch (_a.label) {
|
|
7855
|
-
case 0:
|
|
7856
|
-
if (this.options.isVerbose) {
|
|
7857
|
-
console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
|
|
7858
|
-
}
|
|
7859
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7860
|
-
return [4 /*yield*/, this.getClient()];
|
|
7861
|
-
case 1:
|
|
7862
|
-
client = _a.sent();
|
|
7863
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7864
|
-
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
7865
|
-
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
7866
|
-
}
|
|
7867
|
-
modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
7868
|
-
modelSettings = {
|
|
7869
|
-
model: modelName,
|
|
7870
|
-
max_tokens: modelRequirements.maxTokens || 2000,
|
|
7871
|
-
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
7872
|
-
temperature: modelRequirements.temperature,
|
|
7873
|
-
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
7874
|
-
// <- Note: [🧆]
|
|
7875
|
-
};
|
|
7876
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7877
|
-
rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
|
|
7878
|
-
start = getCurrentIsoDate();
|
|
7879
|
-
if (this.options.isVerbose) {
|
|
7880
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7881
|
-
}
|
|
7882
|
-
return [4 /*yield*/, client.completions.create(rawRequest)];
|
|
7883
|
-
case 2:
|
|
7884
|
-
rawResponse = _a.sent();
|
|
7885
|
-
if (this.options.isVerbose) {
|
|
7886
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7887
|
-
}
|
|
7888
|
-
if (!rawResponse.choices[0]) {
|
|
7889
|
-
throw new PipelineExecutionError('No choises from OpenAI');
|
|
7890
|
-
}
|
|
7891
|
-
if (rawResponse.choices.length > 1) {
|
|
7892
|
-
// TODO: This should be maybe only warning
|
|
7893
|
-
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
7894
|
-
}
|
|
7895
|
-
resultContent = rawResponse.choices[0].text;
|
|
7896
|
-
// eslint-disable-next-line prefer-const
|
|
7897
|
-
complete = getCurrentIsoDate();
|
|
7898
|
-
usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
|
|
7899
|
-
return [2 /*return*/, {
|
|
7900
|
-
content: resultContent,
|
|
7901
|
-
modelName: rawResponse.model || modelName,
|
|
7902
|
-
timing: {
|
|
7903
|
-
start: start,
|
|
7904
|
-
complete: complete,
|
|
7905
|
-
},
|
|
7906
|
-
usage: usage,
|
|
7907
|
-
rawPromptContent: rawPromptContent,
|
|
7908
|
-
rawRequest: rawRequest,
|
|
7909
|
-
rawResponse: rawResponse,
|
|
7910
|
-
// <- [🗯]
|
|
7911
|
-
}];
|
|
7912
|
-
}
|
|
7913
|
-
});
|
|
7914
|
-
});
|
|
7915
|
-
};
|
|
7916
|
-
/**
|
|
7917
|
-
* Calls OpenAI API to use a embedding model
|
|
7918
|
-
*/
|
|
7919
|
-
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
7920
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
7921
|
-
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
7922
|
-
return __generator(this, function (_a) {
|
|
7923
|
-
switch (_a.label) {
|
|
7924
|
-
case 0:
|
|
7925
|
-
if (this.options.isVerbose) {
|
|
7926
|
-
console.info('🖋 OpenAI embedding call', { prompt: prompt });
|
|
7927
|
-
}
|
|
7928
|
-
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
7929
|
-
return [4 /*yield*/, this.getClient()];
|
|
7930
|
-
case 1:
|
|
7931
|
-
client = _a.sent();
|
|
7932
|
-
// TODO: [☂] Use here more modelRequirements
|
|
7933
|
-
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
7934
|
-
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
7935
|
-
}
|
|
7936
|
-
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
7937
|
-
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
7938
|
-
rawRequest = {
|
|
7939
|
-
input: rawPromptContent,
|
|
7940
|
-
model: modelName,
|
|
7941
|
-
};
|
|
7942
|
-
start = getCurrentIsoDate();
|
|
7943
|
-
if (this.options.isVerbose) {
|
|
7944
|
-
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
7945
|
-
}
|
|
7946
|
-
return [4 /*yield*/, client.embeddings.create(rawRequest)];
|
|
7947
|
-
case 2:
|
|
7948
|
-
rawResponse = _a.sent();
|
|
7949
|
-
if (this.options.isVerbose) {
|
|
7950
|
-
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
7951
|
-
}
|
|
7952
|
-
if (rawResponse.data.length !== 1) {
|
|
7953
|
-
throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
|
|
7954
|
-
}
|
|
7955
|
-
resultContent = rawResponse.data[0].embedding;
|
|
7956
|
-
// eslint-disable-next-line prefer-const
|
|
7957
|
-
complete = getCurrentIsoDate();
|
|
7958
|
-
usage = computeOpenAiUsage(content, '', rawResponse);
|
|
7959
|
-
return [2 /*return*/, {
|
|
7960
|
-
content: resultContent,
|
|
7961
|
-
modelName: rawResponse.model || modelName,
|
|
7962
|
-
timing: {
|
|
7963
|
-
start: start,
|
|
7964
|
-
complete: complete,
|
|
7965
|
-
},
|
|
7966
|
-
usage: usage,
|
|
7967
|
-
rawPromptContent: rawPromptContent,
|
|
7968
|
-
rawRequest: rawRequest,
|
|
7969
|
-
rawResponse: rawResponse,
|
|
7970
|
-
// <- [🗯]
|
|
7971
|
-
}];
|
|
7972
|
-
}
|
|
7973
|
-
});
|
|
7974
|
-
});
|
|
7975
|
-
};
|
|
7976
|
-
// <- Note: [🤖] callXxxModel
|
|
7977
|
-
/**
|
|
7978
|
-
* Get the model that should be used as default
|
|
7979
|
-
*/
|
|
7980
|
-
OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
|
|
7981
|
-
var model = OPENAI_MODELS.find(function (_a) {
|
|
7982
|
-
var modelName = _a.modelName;
|
|
7983
|
-
return modelName === defaultModelName;
|
|
7984
|
-
});
|
|
7985
|
-
if (model === undefined) {
|
|
7986
|
-
throw new UnexpectedError(spaceTrim(function (block) {
|
|
7987
|
-
return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
|
|
7988
|
-
var modelName = _a.modelName;
|
|
7989
|
-
return "- \"".concat(modelName, "\"");
|
|
7990
|
-
}).join('\n')), "\n\n ");
|
|
7991
|
-
}));
|
|
7992
|
-
}
|
|
7993
|
-
return model;
|
|
7994
|
-
};
|
|
7995
|
-
/**
|
|
7996
|
-
* Default model for chat variant.
|
|
7997
|
-
*/
|
|
7998
|
-
OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
|
|
7999
|
-
return this.getDefaultModel('gpt-4o');
|
|
8000
|
-
};
|
|
8001
|
-
/**
|
|
8002
|
-
* Default model for completion variant.
|
|
8003
|
-
*/
|
|
8004
|
-
OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
|
|
8005
|
-
return this.getDefaultModel('gpt-3.5-turbo-instruct');
|
|
8006
|
-
};
|
|
8007
|
-
/**
|
|
8008
|
-
* Default model for completion variant.
|
|
8009
|
-
*/
|
|
8010
|
-
OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
|
|
8011
|
-
return this.getDefaultModel('text-embedding-3-large');
|
|
8012
|
-
};
|
|
8013
|
-
return OpenAiExecutionTools;
|
|
8014
|
-
}());
|
|
8015
|
-
/**
|
|
8016
|
-
* TODO: [🧠][🧙♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
|
|
8017
|
-
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
8018
|
-
* TODO: Maybe make custom OpenAiError
|
|
8019
|
-
* TODO: [🧠][🈁] Maybe use `isDeterministic` from options
|
|
8020
|
-
* TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
|
|
8021
|
-
*/
|
|
8022
|
-
|
|
8023
|
-
/**
|
|
8024
|
-
* Execution Tools for calling OpenAI API
|
|
8025
|
-
*
|
|
8026
|
-
* @public exported from `@promptbook/openai`
|
|
8027
|
-
*/
|
|
8028
|
-
var createOpenAiExecutionTools = Object.assign(function (options) {
|
|
8029
|
-
// TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
|
|
8030
|
-
return new OpenAiExecutionTools(options);
|
|
8031
|
-
}, {
|
|
8032
|
-
packageName: '@promptbook/openai',
|
|
8033
|
-
className: 'OpenAiExecutionTools',
|
|
8034
|
-
});
|
|
8035
|
-
/**
|
|
8036
|
-
* TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
|
|
8037
|
-
* TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
|
|
8038
|
-
*/
|
|
8039
|
-
|
|
8040
|
-
/**
|
|
8041
|
-
* @@@
|
|
8042
|
-
*
|
|
8043
|
-
* TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
|
|
8044
|
-
*
|
|
8045
|
-
* @private internal type for `createLlmToolsFromConfiguration`
|
|
8046
|
-
*/
|
|
8047
|
-
var EXECUTION_TOOLS_CLASSES = {
|
|
8048
|
-
createOpenAiExecutionTools: createOpenAiExecutionTools,
|
|
8049
|
-
createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
|
|
8050
|
-
createAzureOpenAiExecutionTools: function (options) {
|
|
8051
|
-
return new AzureOpenAiExecutionTools(
|
|
8052
|
-
// <- TODO: [🧱] Implement in a functional (not new Class) way
|
|
8053
|
-
options);
|
|
8054
|
-
},
|
|
8055
|
-
// <- Note: [🦑] Add here new LLM provider
|
|
8056
|
-
};
|
|
8057
|
-
/**
|
|
8058
|
-
* TODO: !!!!!!! Make global register for this
|
|
8059
|
-
* TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
|
|
8060
|
-
*/
|
|
8061
|
-
|
|
8062
|
-
/**
|
|
8063
|
-
* @@@
|
|
8064
|
-
*
|
|
8065
|
-
* Note: This function is not cached, every call creates new instance of `MultipleLlmExecutionTools`
|
|
8066
|
-
*
|
|
8067
|
-
* @returns @@@
|
|
8068
|
-
* @public exported from `@promptbook/core`
|
|
8069
|
-
*/
|
|
8070
|
-
function createLlmToolsFromConfiguration(configuration, options) {
|
|
8071
|
-
if (options === void 0) { options = {}; }
|
|
8072
|
-
var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
|
|
8073
|
-
var llmTools = configuration.map(function (llmConfiguration) {
|
|
8074
|
-
var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
|
|
8075
|
-
if (!constructor) {
|
|
8076
|
-
throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
|
|
6576
|
+
if (registeredItem === undefined) {
|
|
6577
|
+
throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "` from `").concat(llmConfiguration.packageName, "`\n\n You have probably forgotten install and import the provider package.\n To fix this issue, you can:\n\n Install:\n\n > npm install ").concat(llmConfiguration.packageName, "\n\n And import:\n\n > import '").concat(llmConfiguration.packageName, "';\n\n\n ").concat(block($registeredLlmToolsMessage()), "\n "); }));
|
|
8077
6578
|
}
|
|
8078
|
-
return
|
|
6579
|
+
return registeredItem(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
|
|
8079
6580
|
});
|
|
8080
6581
|
return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
|
|
8081
6582
|
}
|
|
@@ -8110,7 +6611,7 @@ function createLlmToolsFromEnv(options) {
|
|
|
8110
6611
|
var configuration = createLlmToolsFromConfigurationFromEnv();
|
|
8111
6612
|
if (configuration.length === 0) {
|
|
8112
6613
|
// TODO: [🥃]
|
|
8113
|
-
throw new Error(spaceTrim("\n
|
|
6614
|
+
throw new Error(spaceTrim(function (block) { return "\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n\n ".concat(block($registeredLlmToolsMessage()), "}\n "); }));
|
|
8114
6615
|
}
|
|
8115
6616
|
return createLlmToolsFromConfiguration(configuration, options);
|
|
8116
6617
|
}
|