@promptbook/remote-server 0.66.0-5 → 0.66.0-7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +270 -157
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/cli.index.d.ts +6 -2
- package/esm/typings/src/cli/main.d.ts +2 -2
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
- package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
- package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -0
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
- package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +1 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
- package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +270 -157
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
/**
|
|
17
17
|
* The version of the Promptbook library
|
|
18
18
|
*/
|
|
19
|
-
var PROMPTBOOK_VERSION = '0.66.0-
|
|
19
|
+
var PROMPTBOOK_VERSION = '0.66.0-6';
|
|
20
20
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
21
21
|
|
|
22
22
|
/*! *****************************************************************************
|
|
@@ -203,6 +203,60 @@
|
|
|
203
203
|
enumerable: false,
|
|
204
204
|
configurable: true
|
|
205
205
|
});
|
|
206
|
+
/**
|
|
207
|
+
* Check the configuration of all execution tools
|
|
208
|
+
*/
|
|
209
|
+
MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
210
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
211
|
+
return __generator(this, function (_a) {
|
|
212
|
+
return [2 /*return*/];
|
|
213
|
+
});
|
|
214
|
+
});
|
|
215
|
+
};
|
|
216
|
+
/**
|
|
217
|
+
* List all available models that can be used
|
|
218
|
+
* This lists is a combination of all available models from all execution tools
|
|
219
|
+
*/
|
|
220
|
+
MultipleLlmExecutionTools.prototype.listModels = function () {
|
|
221
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
222
|
+
var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
|
|
223
|
+
var e_1, _c;
|
|
224
|
+
return __generator(this, function (_d) {
|
|
225
|
+
switch (_d.label) {
|
|
226
|
+
case 0:
|
|
227
|
+
availableModels = [];
|
|
228
|
+
_d.label = 1;
|
|
229
|
+
case 1:
|
|
230
|
+
_d.trys.push([1, 6, 7, 8]);
|
|
231
|
+
_a = __values(this.llmExecutionTools), _b = _a.next();
|
|
232
|
+
_d.label = 2;
|
|
233
|
+
case 2:
|
|
234
|
+
if (!!_b.done) return [3 /*break*/, 5];
|
|
235
|
+
llmExecutionTools = _b.value;
|
|
236
|
+
return [4 /*yield*/, llmExecutionTools.listModels()];
|
|
237
|
+
case 3:
|
|
238
|
+
models = _d.sent();
|
|
239
|
+
availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
|
|
240
|
+
_d.label = 4;
|
|
241
|
+
case 4:
|
|
242
|
+
_b = _a.next();
|
|
243
|
+
return [3 /*break*/, 2];
|
|
244
|
+
case 5: return [3 /*break*/, 8];
|
|
245
|
+
case 6:
|
|
246
|
+
e_1_1 = _d.sent();
|
|
247
|
+
e_1 = { error: e_1_1 };
|
|
248
|
+
return [3 /*break*/, 8];
|
|
249
|
+
case 7:
|
|
250
|
+
try {
|
|
251
|
+
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
252
|
+
}
|
|
253
|
+
finally { if (e_1) throw e_1.error; }
|
|
254
|
+
return [7 /*endfinally*/];
|
|
255
|
+
case 8: return [2 /*return*/, availableModels];
|
|
256
|
+
}
|
|
257
|
+
});
|
|
258
|
+
});
|
|
259
|
+
};
|
|
206
260
|
/**
|
|
207
261
|
* Calls the best available chat model
|
|
208
262
|
*/
|
|
@@ -229,8 +283,8 @@
|
|
|
229
283
|
*/
|
|
230
284
|
MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
|
|
231
285
|
return __awaiter(this, void 0, void 0, function () {
|
|
232
|
-
var errors, _a, _b, llmExecutionTools, _c, error_1,
|
|
233
|
-
var
|
|
286
|
+
var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
|
|
287
|
+
var e_2, _d;
|
|
234
288
|
var _this = this;
|
|
235
289
|
return __generator(this, function (_e) {
|
|
236
290
|
switch (_e.label) {
|
|
@@ -286,14 +340,14 @@
|
|
|
286
340
|
return [3 /*break*/, 2];
|
|
287
341
|
case 14: return [3 /*break*/, 17];
|
|
288
342
|
case 15:
|
|
289
|
-
|
|
290
|
-
|
|
343
|
+
e_2_1 = _e.sent();
|
|
344
|
+
e_2 = { error: e_2_1 };
|
|
291
345
|
return [3 /*break*/, 17];
|
|
292
346
|
case 16:
|
|
293
347
|
try {
|
|
294
348
|
if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
|
|
295
349
|
}
|
|
296
|
-
finally { if (
|
|
350
|
+
finally { if (e_2) throw e_2.error; }
|
|
297
351
|
return [7 /*endfinally*/];
|
|
298
352
|
case 17:
|
|
299
353
|
if (errors.length === 1) {
|
|
@@ -321,50 +375,6 @@
|
|
|
321
375
|
});
|
|
322
376
|
});
|
|
323
377
|
};
|
|
324
|
-
/**
|
|
325
|
-
* List all available models that can be used
|
|
326
|
-
* This lists is a combination of all available models from all execution tools
|
|
327
|
-
*/
|
|
328
|
-
MultipleLlmExecutionTools.prototype.listModels = function () {
|
|
329
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
330
|
-
var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
|
|
331
|
-
var e_2, _c;
|
|
332
|
-
return __generator(this, function (_d) {
|
|
333
|
-
switch (_d.label) {
|
|
334
|
-
case 0:
|
|
335
|
-
availableModels = [];
|
|
336
|
-
_d.label = 1;
|
|
337
|
-
case 1:
|
|
338
|
-
_d.trys.push([1, 6, 7, 8]);
|
|
339
|
-
_a = __values(this.llmExecutionTools), _b = _a.next();
|
|
340
|
-
_d.label = 2;
|
|
341
|
-
case 2:
|
|
342
|
-
if (!!_b.done) return [3 /*break*/, 5];
|
|
343
|
-
llmExecutionTools = _b.value;
|
|
344
|
-
return [4 /*yield*/, llmExecutionTools.listModels()];
|
|
345
|
-
case 3:
|
|
346
|
-
models = _d.sent();
|
|
347
|
-
availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
|
|
348
|
-
_d.label = 4;
|
|
349
|
-
case 4:
|
|
350
|
-
_b = _a.next();
|
|
351
|
-
return [3 /*break*/, 2];
|
|
352
|
-
case 5: return [3 /*break*/, 8];
|
|
353
|
-
case 6:
|
|
354
|
-
e_2_1 = _d.sent();
|
|
355
|
-
e_2 = { error: e_2_1 };
|
|
356
|
-
return [3 /*break*/, 8];
|
|
357
|
-
case 7:
|
|
358
|
-
try {
|
|
359
|
-
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
360
|
-
}
|
|
361
|
-
finally { if (e_2) throw e_2.error; }
|
|
362
|
-
return [7 /*endfinally*/];
|
|
363
|
-
case 8: return [2 /*return*/, availableModels];
|
|
364
|
-
}
|
|
365
|
-
});
|
|
366
|
-
});
|
|
367
|
-
};
|
|
368
378
|
return MultipleLlmExecutionTools;
|
|
369
379
|
}());
|
|
370
380
|
/**
|
|
@@ -452,6 +462,29 @@
|
|
|
452
462
|
enumerable: false,
|
|
453
463
|
configurable: true
|
|
454
464
|
});
|
|
465
|
+
/**
|
|
466
|
+
* Check the configuration of all execution tools
|
|
467
|
+
*/
|
|
468
|
+
RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
|
|
469
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
470
|
+
return __generator(this, function (_a) {
|
|
471
|
+
return [2 /*return*/];
|
|
472
|
+
});
|
|
473
|
+
});
|
|
474
|
+
};
|
|
475
|
+
/**
|
|
476
|
+
* List all available models that can be used
|
|
477
|
+
*/
|
|
478
|
+
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
479
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
480
|
+
return __generator(this, function (_a) {
|
|
481
|
+
return [2 /*return*/, (this.options.models ||
|
|
482
|
+
[
|
|
483
|
+
/* !!!!!! */
|
|
484
|
+
])];
|
|
485
|
+
});
|
|
486
|
+
});
|
|
487
|
+
};
|
|
455
488
|
/**
|
|
456
489
|
* Creates a connection to the remote proxy server.
|
|
457
490
|
*/
|
|
@@ -546,19 +579,6 @@
|
|
|
546
579
|
});
|
|
547
580
|
});
|
|
548
581
|
};
|
|
549
|
-
/**
|
|
550
|
-
* List all available models that can be used
|
|
551
|
-
*/
|
|
552
|
-
RemoteLlmExecutionTools.prototype.listModels = function () {
|
|
553
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
554
|
-
return __generator(this, function (_a) {
|
|
555
|
-
return [2 /*return*/, (this.options.models ||
|
|
556
|
-
[
|
|
557
|
-
/* !!! */
|
|
558
|
-
])];
|
|
559
|
-
});
|
|
560
|
-
});
|
|
561
|
-
};
|
|
562
582
|
return RemoteLlmExecutionTools;
|
|
563
583
|
}());
|
|
564
584
|
/**
|
|
@@ -1265,12 +1285,10 @@
|
|
|
1265
1285
|
function AnthropicClaudeExecutionTools(options) {
|
|
1266
1286
|
if (options === void 0) { options = { isProxied: false }; }
|
|
1267
1287
|
this.options = options;
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
this.client = new Anthropic__default["default"](anthropicOptions);
|
|
1273
|
-
// <- TODO: !!!!!! Lazy-load client
|
|
1288
|
+
/**
|
|
1289
|
+
* Anthropic Claude API client.
|
|
1290
|
+
*/
|
|
1291
|
+
this.client = null;
|
|
1274
1292
|
}
|
|
1275
1293
|
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
1276
1294
|
get: function () {
|
|
@@ -1286,12 +1304,47 @@
|
|
|
1286
1304
|
enumerable: false,
|
|
1287
1305
|
configurable: true
|
|
1288
1306
|
});
|
|
1307
|
+
AnthropicClaudeExecutionTools.prototype.getClient = function () {
|
|
1308
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
1309
|
+
var anthropicOptions;
|
|
1310
|
+
return __generator(this, function (_a) {
|
|
1311
|
+
if (this.client === null) {
|
|
1312
|
+
anthropicOptions = __assign({}, this.options);
|
|
1313
|
+
delete anthropicOptions.isVerbose;
|
|
1314
|
+
delete anthropicOptions.isProxied;
|
|
1315
|
+
this.client = new Anthropic__default["default"](anthropicOptions);
|
|
1316
|
+
}
|
|
1317
|
+
return [2 /*return*/, this.client];
|
|
1318
|
+
});
|
|
1319
|
+
});
|
|
1320
|
+
};
|
|
1321
|
+
/**
|
|
1322
|
+
* Check the `options` passed to `constructor`
|
|
1323
|
+
*/
|
|
1324
|
+
AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
|
|
1325
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
1326
|
+
return __generator(this, function (_a) {
|
|
1327
|
+
switch (_a.label) {
|
|
1328
|
+
case 0: return [4 /*yield*/, this.getClient()];
|
|
1329
|
+
case 1:
|
|
1330
|
+
_a.sent();
|
|
1331
|
+
return [2 /*return*/];
|
|
1332
|
+
}
|
|
1333
|
+
});
|
|
1334
|
+
});
|
|
1335
|
+
};
|
|
1336
|
+
/**
|
|
1337
|
+
* List all available Anthropic Claude models that can be used
|
|
1338
|
+
*/
|
|
1339
|
+
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
1340
|
+
return ANTHROPIC_CLAUDE_MODELS;
|
|
1341
|
+
};
|
|
1289
1342
|
/**
|
|
1290
1343
|
* Calls Anthropic Claude API to use a chat model.
|
|
1291
1344
|
*/
|
|
1292
1345
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
1293
1346
|
return __awaiter(this, void 0, void 0, function () {
|
|
1294
|
-
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
|
|
1347
|
+
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
|
|
1295
1348
|
return __generator(this, function (_a) {
|
|
1296
1349
|
switch (_a.label) {
|
|
1297
1350
|
case 0:
|
|
@@ -1299,6 +1352,9 @@
|
|
|
1299
1352
|
console.info('💬 Anthropic Claude callChatModel call');
|
|
1300
1353
|
}
|
|
1301
1354
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
1355
|
+
return [4 /*yield*/, this.getClient()];
|
|
1356
|
+
case 1:
|
|
1357
|
+
client = _a.sent();
|
|
1302
1358
|
// TODO: [☂] Use here more modelRequirements
|
|
1303
1359
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
1304
1360
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
@@ -1325,8 +1381,8 @@
|
|
|
1325
1381
|
if (this.options.isVerbose) {
|
|
1326
1382
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
1327
1383
|
}
|
|
1328
|
-
return [4 /*yield*/,
|
|
1329
|
-
case
|
|
1384
|
+
return [4 /*yield*/, client.messages.create(rawRequest)];
|
|
1385
|
+
case 2:
|
|
1330
1386
|
rawResponse = _a.sent();
|
|
1331
1387
|
if (this.options.isVerbose) {
|
|
1332
1388
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -1457,13 +1513,6 @@
|
|
|
1457
1513
|
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
|
|
1458
1514
|
return this.getDefaultModel('claude-3-opus');
|
|
1459
1515
|
};
|
|
1460
|
-
// <- Note: [🤖] getDefaultXxxModel
|
|
1461
|
-
/**
|
|
1462
|
-
* List all available Anthropic Claude models that can be used
|
|
1463
|
-
*/
|
|
1464
|
-
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
1465
|
-
return ANTHROPIC_CLAUDE_MODELS;
|
|
1466
|
-
};
|
|
1467
1516
|
return AnthropicClaudeExecutionTools;
|
|
1468
1517
|
}());
|
|
1469
1518
|
/**
|
|
@@ -1870,10 +1919,10 @@
|
|
|
1870
1919
|
*/
|
|
1871
1920
|
function AzureOpenAiExecutionTools(options) {
|
|
1872
1921
|
this.options = options;
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1922
|
+
/**
|
|
1923
|
+
* OpenAI Azure API client.
|
|
1924
|
+
*/
|
|
1925
|
+
this.client = null;
|
|
1877
1926
|
}
|
|
1878
1927
|
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
|
|
1879
1928
|
get: function () {
|
|
@@ -1889,28 +1938,74 @@
|
|
|
1889
1938
|
enumerable: false,
|
|
1890
1939
|
configurable: true
|
|
1891
1940
|
});
|
|
1941
|
+
AzureOpenAiExecutionTools.prototype.getClient = function () {
|
|
1942
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
1943
|
+
return __generator(this, function (_a) {
|
|
1944
|
+
if (this.client === null) {
|
|
1945
|
+
this.client = new openai.OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(this.options.apiKey));
|
|
1946
|
+
}
|
|
1947
|
+
return [2 /*return*/, this.client];
|
|
1948
|
+
});
|
|
1949
|
+
});
|
|
1950
|
+
};
|
|
1951
|
+
/**
|
|
1952
|
+
* Check the `options` passed to `constructor`
|
|
1953
|
+
*/
|
|
1954
|
+
AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
1955
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
1956
|
+
return __generator(this, function (_a) {
|
|
1957
|
+
switch (_a.label) {
|
|
1958
|
+
case 0: return [4 /*yield*/, this.getClient()];
|
|
1959
|
+
case 1:
|
|
1960
|
+
_a.sent();
|
|
1961
|
+
return [2 /*return*/];
|
|
1962
|
+
}
|
|
1963
|
+
});
|
|
1964
|
+
});
|
|
1965
|
+
};
|
|
1966
|
+
/**
|
|
1967
|
+
* List all available Azure OpenAI models that can be used
|
|
1968
|
+
*/
|
|
1969
|
+
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
1970
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
1971
|
+
return __generator(this, function (_a) {
|
|
1972
|
+
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
1973
|
+
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
1974
|
+
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
1975
|
+
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
1976
|
+
return ({
|
|
1977
|
+
modelTitle: "Azure ".concat(modelTitle),
|
|
1978
|
+
modelName: modelName,
|
|
1979
|
+
modelVariant: modelVariant,
|
|
1980
|
+
});
|
|
1981
|
+
})];
|
|
1982
|
+
});
|
|
1983
|
+
});
|
|
1984
|
+
};
|
|
1892
1985
|
/**
|
|
1893
1986
|
* Calls OpenAI API to use a chat model.
|
|
1894
1987
|
*/
|
|
1895
1988
|
AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
1896
1989
|
var _a, _b;
|
|
1897
1990
|
return __awaiter(this, void 0, void 0, function () {
|
|
1898
|
-
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
1899
|
-
|
|
1900
|
-
|
|
1901
|
-
switch (_d.label) {
|
|
1991
|
+
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
|
|
1992
|
+
return __generator(this, function (_c) {
|
|
1993
|
+
switch (_c.label) {
|
|
1902
1994
|
case 0:
|
|
1903
1995
|
if (this.options.isVerbose) {
|
|
1904
1996
|
console.info('💬 OpenAI callChatModel call');
|
|
1905
1997
|
}
|
|
1906
1998
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
1999
|
+
return [4 /*yield*/, this.getClient()];
|
|
2000
|
+
case 1:
|
|
2001
|
+
client = _c.sent();
|
|
1907
2002
|
// TODO: [☂] Use here more modelRequirements
|
|
1908
2003
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
1909
2004
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
1910
2005
|
}
|
|
1911
|
-
|
|
1912
|
-
case
|
|
1913
|
-
|
|
2006
|
+
_c.label = 2;
|
|
2007
|
+
case 2:
|
|
2008
|
+
_c.trys.push([2, 4, , 5]);
|
|
1914
2009
|
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
1915
2010
|
modelSettings = {
|
|
1916
2011
|
maxTokens: modelRequirements.maxTokens,
|
|
@@ -1940,9 +2035,9 @@
|
|
|
1940
2035
|
console.info(colors__default["default"].bgWhite('messages'), JSON.stringify(messages, null, 4));
|
|
1941
2036
|
}
|
|
1942
2037
|
rawRequest = [modelName, messages, modelSettings];
|
|
1943
|
-
return [4 /*yield*/,
|
|
1944
|
-
case
|
|
1945
|
-
rawResponse =
|
|
2038
|
+
return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
2039
|
+
case 3:
|
|
2040
|
+
rawResponse = _c.sent();
|
|
1946
2041
|
if (this.options.isVerbose) {
|
|
1947
2042
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
1948
2043
|
}
|
|
@@ -1977,10 +2072,10 @@
|
|
|
1977
2072
|
rawResponse: rawResponse,
|
|
1978
2073
|
// <- [🗯]
|
|
1979
2074
|
}];
|
|
1980
|
-
case
|
|
1981
|
-
error_1 =
|
|
2075
|
+
case 4:
|
|
2076
|
+
error_1 = _c.sent();
|
|
1982
2077
|
throw this.transformAzureError(error_1);
|
|
1983
|
-
case
|
|
2078
|
+
case 5: return [2 /*return*/];
|
|
1984
2079
|
}
|
|
1985
2080
|
});
|
|
1986
2081
|
});
|
|
@@ -1991,22 +2086,24 @@
|
|
|
1991
2086
|
AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
1992
2087
|
var _a, _b;
|
|
1993
2088
|
return __awaiter(this, void 0, void 0, function () {
|
|
1994
|
-
var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
switch (_d.label) {
|
|
2089
|
+
var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
|
|
2090
|
+
return __generator(this, function (_c) {
|
|
2091
|
+
switch (_c.label) {
|
|
1998
2092
|
case 0:
|
|
1999
2093
|
if (this.options.isVerbose) {
|
|
2000
2094
|
console.info('🖋 OpenAI callCompletionModel call');
|
|
2001
2095
|
}
|
|
2002
2096
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
2097
|
+
return [4 /*yield*/, this.getClient()];
|
|
2098
|
+
case 1:
|
|
2099
|
+
client = _c.sent();
|
|
2003
2100
|
// TODO: [☂] Use here more modelRequirements
|
|
2004
2101
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
2005
2102
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
2006
2103
|
}
|
|
2007
|
-
|
|
2008
|
-
case
|
|
2009
|
-
|
|
2104
|
+
_c.label = 2;
|
|
2105
|
+
case 2:
|
|
2106
|
+
_c.trys.push([2, 4, , 5]);
|
|
2010
2107
|
modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
|
|
2011
2108
|
modelSettings = {
|
|
2012
2109
|
maxTokens: modelRequirements.maxTokens || 2000,
|
|
@@ -2028,9 +2125,9 @@
|
|
|
2028
2125
|
[rawPromptContent],
|
|
2029
2126
|
modelSettings,
|
|
2030
2127
|
];
|
|
2031
|
-
return [4 /*yield*/,
|
|
2032
|
-
case
|
|
2033
|
-
rawResponse =
|
|
2128
|
+
return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
|
|
2129
|
+
case 3:
|
|
2130
|
+
rawResponse = _c.sent();
|
|
2034
2131
|
if (this.options.isVerbose) {
|
|
2035
2132
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
2036
2133
|
}
|
|
@@ -2062,10 +2159,10 @@
|
|
|
2062
2159
|
rawResponse: rawResponse,
|
|
2063
2160
|
// <- [🗯]
|
|
2064
2161
|
}];
|
|
2065
|
-
case
|
|
2066
|
-
error_2 =
|
|
2162
|
+
case 4:
|
|
2163
|
+
error_2 = _c.sent();
|
|
2067
2164
|
throw this.transformAzureError(error_2);
|
|
2068
|
-
case
|
|
2165
|
+
case 5: return [2 /*return*/];
|
|
2069
2166
|
}
|
|
2070
2167
|
});
|
|
2071
2168
|
});
|
|
@@ -2081,25 +2178,6 @@
|
|
|
2081
2178
|
var code = azureError.code, message = azureError.message;
|
|
2082
2179
|
return new PipelineExecutionError("".concat(code, ": ").concat(message));
|
|
2083
2180
|
};
|
|
2084
|
-
/**
|
|
2085
|
-
* List all available Azure OpenAI models that can be used
|
|
2086
|
-
*/
|
|
2087
|
-
AzureOpenAiExecutionTools.prototype.listModels = function () {
|
|
2088
|
-
return __awaiter(this, void 0, void 0, function () {
|
|
2089
|
-
return __generator(this, function (_a) {
|
|
2090
|
-
// TODO: !!! Do here some filtering which models are really available as deployment
|
|
2091
|
-
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
|
|
2092
|
-
return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
|
|
2093
|
-
var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
|
|
2094
|
-
return ({
|
|
2095
|
-
modelTitle: "Azure ".concat(modelTitle),
|
|
2096
|
-
modelName: modelName,
|
|
2097
|
-
modelVariant: modelVariant,
|
|
2098
|
-
});
|
|
2099
|
-
})];
|
|
2100
|
-
});
|
|
2101
|
-
});
|
|
2102
|
-
};
|
|
2103
2181
|
return AzureOpenAiExecutionTools;
|
|
2104
2182
|
}());
|
|
2105
2183
|
/**
|
|
@@ -2161,12 +2239,10 @@
|
|
|
2161
2239
|
function OpenAiExecutionTools(options) {
|
|
2162
2240
|
if (options === void 0) { options = {}; }
|
|
2163
2241
|
this.options = options;
|
|
2164
|
-
|
|
2165
|
-
|
|
2166
|
-
|
|
2167
|
-
|
|
2168
|
-
this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
|
|
2169
|
-
// <- TODO: !!!!!! Lazy-load client
|
|
2242
|
+
/**
|
|
2243
|
+
* OpenAI API client.
|
|
2244
|
+
*/
|
|
2245
|
+
this.client = null;
|
|
2170
2246
|
}
|
|
2171
2247
|
Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
|
|
2172
2248
|
get: function () {
|
|
@@ -2182,12 +2258,54 @@
|
|
|
2182
2258
|
enumerable: false,
|
|
2183
2259
|
configurable: true
|
|
2184
2260
|
});
|
|
2261
|
+
OpenAiExecutionTools.prototype.getClient = function () {
|
|
2262
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2263
|
+
var openAiOptions;
|
|
2264
|
+
return __generator(this, function (_a) {
|
|
2265
|
+
if (this.client === null) {
|
|
2266
|
+
openAiOptions = __assign({}, this.options);
|
|
2267
|
+
delete openAiOptions.isVerbose;
|
|
2268
|
+
delete openAiOptions.user;
|
|
2269
|
+
this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
|
|
2270
|
+
}
|
|
2271
|
+
return [2 /*return*/, this.client];
|
|
2272
|
+
});
|
|
2273
|
+
});
|
|
2274
|
+
};
|
|
2275
|
+
/**
|
|
2276
|
+
* Check the `options` passed to `constructor`
|
|
2277
|
+
*/
|
|
2278
|
+
OpenAiExecutionTools.prototype.checkConfiguration = function () {
|
|
2279
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2280
|
+
return __generator(this, function (_a) {
|
|
2281
|
+
switch (_a.label) {
|
|
2282
|
+
case 0: return [4 /*yield*/, this.getClient()];
|
|
2283
|
+
case 1:
|
|
2284
|
+
_a.sent();
|
|
2285
|
+
return [2 /*return*/];
|
|
2286
|
+
}
|
|
2287
|
+
});
|
|
2288
|
+
});
|
|
2289
|
+
};
|
|
2290
|
+
/**
|
|
2291
|
+
* List all available OpenAI models that can be used
|
|
2292
|
+
*/
|
|
2293
|
+
OpenAiExecutionTools.prototype.listModels = function () {
|
|
2294
|
+
/*
|
|
2295
|
+
Note: Dynamic lising of the models
|
|
2296
|
+
const models = await this.openai.models.list({});
|
|
2297
|
+
|
|
2298
|
+
console.log({ models });
|
|
2299
|
+
console.log(models.data);
|
|
2300
|
+
*/
|
|
2301
|
+
return OPENAI_MODELS;
|
|
2302
|
+
};
|
|
2185
2303
|
/**
|
|
2186
2304
|
* Calls OpenAI API to use a chat model.
|
|
2187
2305
|
*/
|
|
2188
2306
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
2189
2307
|
return __awaiter(this, void 0, void 0, function () {
|
|
2190
|
-
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
2308
|
+
var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
2191
2309
|
return __generator(this, function (_a) {
|
|
2192
2310
|
switch (_a.label) {
|
|
2193
2311
|
case 0:
|
|
@@ -2195,6 +2313,9 @@
|
|
|
2195
2313
|
console.info('💬 OpenAI callChatModel call', { prompt: prompt });
|
|
2196
2314
|
}
|
|
2197
2315
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
|
|
2316
|
+
return [4 /*yield*/, this.getClient()];
|
|
2317
|
+
case 1:
|
|
2318
|
+
client = _a.sent();
|
|
2198
2319
|
// TODO: [☂] Use here more modelRequirements
|
|
2199
2320
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
2200
2321
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
@@ -2231,8 +2352,8 @@
|
|
|
2231
2352
|
if (this.options.isVerbose) {
|
|
2232
2353
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2233
2354
|
}
|
|
2234
|
-
return [4 /*yield*/,
|
|
2235
|
-
case
|
|
2355
|
+
return [4 /*yield*/, client.chat.completions.create(rawRequest)];
|
|
2356
|
+
case 2:
|
|
2236
2357
|
rawResponse = _a.sent();
|
|
2237
2358
|
if (this.options.isVerbose) {
|
|
2238
2359
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -2273,7 +2394,7 @@
|
|
|
2273
2394
|
*/
|
|
2274
2395
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
2275
2396
|
return __awaiter(this, void 0, void 0, function () {
|
|
2276
|
-
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
2397
|
+
var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
2277
2398
|
return __generator(this, function (_a) {
|
|
2278
2399
|
switch (_a.label) {
|
|
2279
2400
|
case 0:
|
|
@@ -2281,6 +2402,9 @@
|
|
|
2281
2402
|
console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
|
|
2282
2403
|
}
|
|
2283
2404
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
2405
|
+
return [4 /*yield*/, this.getClient()];
|
|
2406
|
+
case 1:
|
|
2407
|
+
client = _a.sent();
|
|
2284
2408
|
// TODO: [☂] Use here more modelRequirements
|
|
2285
2409
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
2286
2410
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
@@ -2300,8 +2424,8 @@
|
|
|
2300
2424
|
if (this.options.isVerbose) {
|
|
2301
2425
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2302
2426
|
}
|
|
2303
|
-
return [4 /*yield*/,
|
|
2304
|
-
case
|
|
2427
|
+
return [4 /*yield*/, client.completions.create(rawRequest)];
|
|
2428
|
+
case 2:
|
|
2305
2429
|
rawResponse = _a.sent();
|
|
2306
2430
|
if (this.options.isVerbose) {
|
|
2307
2431
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -2339,7 +2463,7 @@
|
|
|
2339
2463
|
*/
|
|
2340
2464
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
2341
2465
|
return __awaiter(this, void 0, void 0, function () {
|
|
2342
|
-
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
2466
|
+
var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
2343
2467
|
return __generator(this, function (_a) {
|
|
2344
2468
|
switch (_a.label) {
|
|
2345
2469
|
case 0:
|
|
@@ -2347,6 +2471,9 @@
|
|
|
2347
2471
|
console.info('🖋 OpenAI embedding call', { prompt: prompt });
|
|
2348
2472
|
}
|
|
2349
2473
|
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
2474
|
+
return [4 /*yield*/, this.getClient()];
|
|
2475
|
+
case 1:
|
|
2476
|
+
client = _a.sent();
|
|
2350
2477
|
// TODO: [☂] Use here more modelRequirements
|
|
2351
2478
|
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
2352
2479
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
@@ -2361,8 +2488,8 @@
|
|
|
2361
2488
|
if (this.options.isVerbose) {
|
|
2362
2489
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2363
2490
|
}
|
|
2364
|
-
return [4 /*yield*/,
|
|
2365
|
-
case
|
|
2491
|
+
return [4 /*yield*/, client.embeddings.create(rawRequest)];
|
|
2492
|
+
case 2:
|
|
2366
2493
|
rawResponse = _a.sent();
|
|
2367
2494
|
if (this.options.isVerbose) {
|
|
2368
2495
|
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
@@ -2428,20 +2555,6 @@
|
|
|
2428
2555
|
OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
|
|
2429
2556
|
return this.getDefaultModel('text-embedding-3-large');
|
|
2430
2557
|
};
|
|
2431
|
-
// <- Note: [🤖] getDefaultXxxModel
|
|
2432
|
-
/**
|
|
2433
|
-
* List all available OpenAI models that can be used
|
|
2434
|
-
*/
|
|
2435
|
-
OpenAiExecutionTools.prototype.listModels = function () {
|
|
2436
|
-
/*
|
|
2437
|
-
Note: Dynamic lising of the models
|
|
2438
|
-
const models = await this.openai.models.list({});
|
|
2439
|
-
|
|
2440
|
-
console.log({ models });
|
|
2441
|
-
console.log(models.data);
|
|
2442
|
-
*/
|
|
2443
|
-
return OPENAI_MODELS;
|
|
2444
|
-
};
|
|
2445
2558
|
return OpenAiExecutionTools;
|
|
2446
2559
|
}());
|
|
2447
2560
|
/**
|