@promptbook/remote-server 0.66.0-5 → 0.66.0-7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/esm/index.es.js +270 -157
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/cli.index.d.ts +6 -2
  4. package/esm/typings/src/cli/main.d.ts +2 -2
  5. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
  6. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
  7. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
  8. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
  9. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
  11. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -0
  12. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
  13. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
  14. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
  15. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
  16. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
  17. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
  19. package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
  20. package/package.json +2 -2
  21. package/umd/index.umd.js +270 -157
  22. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -11,7 +11,7 @@ import OpenAI from 'openai';
11
11
  /**
12
12
  * The version of the Promptbook library
13
13
  */
14
- var PROMPTBOOK_VERSION = '0.66.0-4';
14
+ var PROMPTBOOK_VERSION = '0.66.0-6';
15
15
  // TODO: !!!! List here all the versions and annotate + put into script
16
16
 
17
17
  /*! *****************************************************************************
@@ -198,6 +198,60 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
198
198
  enumerable: false,
199
199
  configurable: true
200
200
  });
201
+ /**
202
+ * Check the configuration of all execution tools
203
+ */
204
+ MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
205
+ return __awaiter(this, void 0, void 0, function () {
206
+ return __generator(this, function (_a) {
207
+ return [2 /*return*/];
208
+ });
209
+ });
210
+ };
211
+ /**
212
+ * List all available models that can be used
213
+ * This lists is a combination of all available models from all execution tools
214
+ */
215
+ MultipleLlmExecutionTools.prototype.listModels = function () {
216
+ return __awaiter(this, void 0, void 0, function () {
217
+ var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
218
+ var e_1, _c;
219
+ return __generator(this, function (_d) {
220
+ switch (_d.label) {
221
+ case 0:
222
+ availableModels = [];
223
+ _d.label = 1;
224
+ case 1:
225
+ _d.trys.push([1, 6, 7, 8]);
226
+ _a = __values(this.llmExecutionTools), _b = _a.next();
227
+ _d.label = 2;
228
+ case 2:
229
+ if (!!_b.done) return [3 /*break*/, 5];
230
+ llmExecutionTools = _b.value;
231
+ return [4 /*yield*/, llmExecutionTools.listModels()];
232
+ case 3:
233
+ models = _d.sent();
234
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
235
+ _d.label = 4;
236
+ case 4:
237
+ _b = _a.next();
238
+ return [3 /*break*/, 2];
239
+ case 5: return [3 /*break*/, 8];
240
+ case 6:
241
+ e_1_1 = _d.sent();
242
+ e_1 = { error: e_1_1 };
243
+ return [3 /*break*/, 8];
244
+ case 7:
245
+ try {
246
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
247
+ }
248
+ finally { if (e_1) throw e_1.error; }
249
+ return [7 /*endfinally*/];
250
+ case 8: return [2 /*return*/, availableModels];
251
+ }
252
+ });
253
+ });
254
+ };
201
255
  /**
202
256
  * Calls the best available chat model
203
257
  */
@@ -224,8 +278,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
224
278
  */
225
279
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
226
280
  return __awaiter(this, void 0, void 0, function () {
227
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
228
- var e_1, _d;
281
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
282
+ var e_2, _d;
229
283
  var _this = this;
230
284
  return __generator(this, function (_e) {
231
285
  switch (_e.label) {
@@ -281,14 +335,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
281
335
  return [3 /*break*/, 2];
282
336
  case 14: return [3 /*break*/, 17];
283
337
  case 15:
284
- e_1_1 = _e.sent();
285
- e_1 = { error: e_1_1 };
338
+ e_2_1 = _e.sent();
339
+ e_2 = { error: e_2_1 };
286
340
  return [3 /*break*/, 17];
287
341
  case 16:
288
342
  try {
289
343
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
290
344
  }
291
- finally { if (e_1) throw e_1.error; }
345
+ finally { if (e_2) throw e_2.error; }
292
346
  return [7 /*endfinally*/];
293
347
  case 17:
294
348
  if (errors.length === 1) {
@@ -316,50 +370,6 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
316
370
  });
317
371
  });
318
372
  };
319
- /**
320
- * List all available models that can be used
321
- * This lists is a combination of all available models from all execution tools
322
- */
323
- MultipleLlmExecutionTools.prototype.listModels = function () {
324
- return __awaiter(this, void 0, void 0, function () {
325
- var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
326
- var e_2, _c;
327
- return __generator(this, function (_d) {
328
- switch (_d.label) {
329
- case 0:
330
- availableModels = [];
331
- _d.label = 1;
332
- case 1:
333
- _d.trys.push([1, 6, 7, 8]);
334
- _a = __values(this.llmExecutionTools), _b = _a.next();
335
- _d.label = 2;
336
- case 2:
337
- if (!!_b.done) return [3 /*break*/, 5];
338
- llmExecutionTools = _b.value;
339
- return [4 /*yield*/, llmExecutionTools.listModels()];
340
- case 3:
341
- models = _d.sent();
342
- availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
343
- _d.label = 4;
344
- case 4:
345
- _b = _a.next();
346
- return [3 /*break*/, 2];
347
- case 5: return [3 /*break*/, 8];
348
- case 6:
349
- e_2_1 = _d.sent();
350
- e_2 = { error: e_2_1 };
351
- return [3 /*break*/, 8];
352
- case 7:
353
- try {
354
- if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
355
- }
356
- finally { if (e_2) throw e_2.error; }
357
- return [7 /*endfinally*/];
358
- case 8: return [2 /*return*/, availableModels];
359
- }
360
- });
361
- });
362
- };
363
373
  return MultipleLlmExecutionTools;
364
374
  }());
365
375
  /**
@@ -447,6 +457,29 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
447
457
  enumerable: false,
448
458
  configurable: true
449
459
  });
460
+ /**
461
+ * Check the configuration of all execution tools
462
+ */
463
+ RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
464
+ return __awaiter(this, void 0, void 0, function () {
465
+ return __generator(this, function (_a) {
466
+ return [2 /*return*/];
467
+ });
468
+ });
469
+ };
470
+ /**
471
+ * List all available models that can be used
472
+ */
473
+ RemoteLlmExecutionTools.prototype.listModels = function () {
474
+ return __awaiter(this, void 0, void 0, function () {
475
+ return __generator(this, function (_a) {
476
+ return [2 /*return*/, (this.options.models ||
477
+ [
478
+ /* !!!!!! */
479
+ ])];
480
+ });
481
+ });
482
+ };
450
483
  /**
451
484
  * Creates a connection to the remote proxy server.
452
485
  */
@@ -541,19 +574,6 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
541
574
  });
542
575
  });
543
576
  };
544
- /**
545
- * List all available models that can be used
546
- */
547
- RemoteLlmExecutionTools.prototype.listModels = function () {
548
- return __awaiter(this, void 0, void 0, function () {
549
- return __generator(this, function (_a) {
550
- return [2 /*return*/, (this.options.models ||
551
- [
552
- /* !!! */
553
- ])];
554
- });
555
- });
556
- };
557
577
  return RemoteLlmExecutionTools;
558
578
  }());
559
579
  /**
@@ -1260,12 +1280,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1260
1280
  function AnthropicClaudeExecutionTools(options) {
1261
1281
  if (options === void 0) { options = { isProxied: false }; }
1262
1282
  this.options = options;
1263
- // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
1264
- var anthropicOptions = __assign({}, options);
1265
- delete anthropicOptions.isVerbose;
1266
- delete anthropicOptions.isProxied;
1267
- this.client = new Anthropic(anthropicOptions);
1268
- // <- TODO: !!!!!! Lazy-load client
1283
+ /**
1284
+ * Anthropic Claude API client.
1285
+ */
1286
+ this.client = null;
1269
1287
  }
1270
1288
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
1271
1289
  get: function () {
@@ -1281,12 +1299,47 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1281
1299
  enumerable: false,
1282
1300
  configurable: true
1283
1301
  });
1302
+ AnthropicClaudeExecutionTools.prototype.getClient = function () {
1303
+ return __awaiter(this, void 0, void 0, function () {
1304
+ var anthropicOptions;
1305
+ return __generator(this, function (_a) {
1306
+ if (this.client === null) {
1307
+ anthropicOptions = __assign({}, this.options);
1308
+ delete anthropicOptions.isVerbose;
1309
+ delete anthropicOptions.isProxied;
1310
+ this.client = new Anthropic(anthropicOptions);
1311
+ }
1312
+ return [2 /*return*/, this.client];
1313
+ });
1314
+ });
1315
+ };
1316
+ /**
1317
+ * Check the `options` passed to `constructor`
1318
+ */
1319
+ AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
1320
+ return __awaiter(this, void 0, void 0, function () {
1321
+ return __generator(this, function (_a) {
1322
+ switch (_a.label) {
1323
+ case 0: return [4 /*yield*/, this.getClient()];
1324
+ case 1:
1325
+ _a.sent();
1326
+ return [2 /*return*/];
1327
+ }
1328
+ });
1329
+ });
1330
+ };
1331
+ /**
1332
+ * List all available Anthropic Claude models that can be used
1333
+ */
1334
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
1335
+ return ANTHROPIC_CLAUDE_MODELS;
1336
+ };
1284
1337
  /**
1285
1338
  * Calls Anthropic Claude API to use a chat model.
1286
1339
  */
1287
1340
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
1288
1341
  return __awaiter(this, void 0, void 0, function () {
1289
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
1342
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
1290
1343
  return __generator(this, function (_a) {
1291
1344
  switch (_a.label) {
1292
1345
  case 0:
@@ -1294,6 +1347,9 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1294
1347
  console.info('💬 Anthropic Claude callChatModel call');
1295
1348
  }
1296
1349
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1350
+ return [4 /*yield*/, this.getClient()];
1351
+ case 1:
1352
+ client = _a.sent();
1297
1353
  // TODO: [☂] Use here more modelRequirements
1298
1354
  if (modelRequirements.modelVariant !== 'CHAT') {
1299
1355
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -1320,8 +1376,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1320
1376
  if (this.options.isVerbose) {
1321
1377
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
1322
1378
  }
1323
- return [4 /*yield*/, this.client.messages.create(rawRequest)];
1324
- case 1:
1379
+ return [4 /*yield*/, client.messages.create(rawRequest)];
1380
+ case 2:
1325
1381
  rawResponse = _a.sent();
1326
1382
  if (this.options.isVerbose) {
1327
1383
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -1452,13 +1508,6 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
1452
1508
  AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
1453
1509
  return this.getDefaultModel('claude-3-opus');
1454
1510
  };
1455
- // <- Note: [🤖] getDefaultXxxModel
1456
- /**
1457
- * List all available Anthropic Claude models that can be used
1458
- */
1459
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
1460
- return ANTHROPIC_CLAUDE_MODELS;
1461
- };
1462
1511
  return AnthropicClaudeExecutionTools;
1463
1512
  }());
1464
1513
  /**
@@ -1865,10 +1914,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
1865
1914
  */
1866
1915
  function AzureOpenAiExecutionTools(options) {
1867
1916
  this.options = options;
1868
- this.client = new OpenAIClient(
1869
- // <- TODO: [🧱] Implement in a functional (not new Class) way
1870
- "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
1871
- // <- TODO: !!!!!! Lazy-load client
1917
+ /**
1918
+ * OpenAI Azure API client.
1919
+ */
1920
+ this.client = null;
1872
1921
  }
1873
1922
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
1874
1923
  get: function () {
@@ -1884,28 +1933,74 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
1884
1933
  enumerable: false,
1885
1934
  configurable: true
1886
1935
  });
1936
+ AzureOpenAiExecutionTools.prototype.getClient = function () {
1937
+ return __awaiter(this, void 0, void 0, function () {
1938
+ return __generator(this, function (_a) {
1939
+ if (this.client === null) {
1940
+ this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
1941
+ }
1942
+ return [2 /*return*/, this.client];
1943
+ });
1944
+ });
1945
+ };
1946
+ /**
1947
+ * Check the `options` passed to `constructor`
1948
+ */
1949
+ AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
1950
+ return __awaiter(this, void 0, void 0, function () {
1951
+ return __generator(this, function (_a) {
1952
+ switch (_a.label) {
1953
+ case 0: return [4 /*yield*/, this.getClient()];
1954
+ case 1:
1955
+ _a.sent();
1956
+ return [2 /*return*/];
1957
+ }
1958
+ });
1959
+ });
1960
+ };
1961
+ /**
1962
+ * List all available Azure OpenAI models that can be used
1963
+ */
1964
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
1965
+ return __awaiter(this, void 0, void 0, function () {
1966
+ return __generator(this, function (_a) {
1967
+ // TODO: !!! Do here some filtering which models are really available as deployment
1968
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
1969
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
1970
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
1971
+ return ({
1972
+ modelTitle: "Azure ".concat(modelTitle),
1973
+ modelName: modelName,
1974
+ modelVariant: modelVariant,
1975
+ });
1976
+ })];
1977
+ });
1978
+ });
1979
+ };
1887
1980
  /**
1888
1981
  * Calls OpenAI API to use a chat model.
1889
1982
  */
1890
1983
  AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
1891
1984
  var _a, _b;
1892
1985
  return __awaiter(this, void 0, void 0, function () {
1893
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1894
- var _c;
1895
- return __generator(this, function (_d) {
1896
- switch (_d.label) {
1986
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
1987
+ return __generator(this, function (_c) {
1988
+ switch (_c.label) {
1897
1989
  case 0:
1898
1990
  if (this.options.isVerbose) {
1899
1991
  console.info('💬 OpenAI callChatModel call');
1900
1992
  }
1901
1993
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
1994
+ return [4 /*yield*/, this.getClient()];
1995
+ case 1:
1996
+ client = _c.sent();
1902
1997
  // TODO: [☂] Use here more modelRequirements
1903
1998
  if (modelRequirements.modelVariant !== 'CHAT') {
1904
1999
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
1905
2000
  }
1906
- _d.label = 1;
1907
- case 1:
1908
- _d.trys.push([1, 3, , 4]);
2001
+ _c.label = 2;
2002
+ case 2:
2003
+ _c.trys.push([2, 4, , 5]);
1909
2004
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
1910
2005
  modelSettings = {
1911
2006
  maxTokens: modelRequirements.maxTokens,
@@ -1935,9 +2030,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
1935
2030
  console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
1936
2031
  }
1937
2032
  rawRequest = [modelName, messages, modelSettings];
1938
- return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
1939
- case 2:
1940
- rawResponse = _d.sent();
2033
+ return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
2034
+ case 3:
2035
+ rawResponse = _c.sent();
1941
2036
  if (this.options.isVerbose) {
1942
2037
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
1943
2038
  }
@@ -1972,10 +2067,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
1972
2067
  rawResponse: rawResponse,
1973
2068
  // <- [🗯]
1974
2069
  }];
1975
- case 3:
1976
- error_1 = _d.sent();
2070
+ case 4:
2071
+ error_1 = _c.sent();
1977
2072
  throw this.transformAzureError(error_1);
1978
- case 4: return [2 /*return*/];
2073
+ case 5: return [2 /*return*/];
1979
2074
  }
1980
2075
  });
1981
2076
  });
@@ -1986,22 +2081,24 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
1986
2081
  AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
1987
2082
  var _a, _b;
1988
2083
  return __awaiter(this, void 0, void 0, function () {
1989
- var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
1990
- var _c;
1991
- return __generator(this, function (_d) {
1992
- switch (_d.label) {
2084
+ var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
2085
+ return __generator(this, function (_c) {
2086
+ switch (_c.label) {
1993
2087
  case 0:
1994
2088
  if (this.options.isVerbose) {
1995
2089
  console.info('🖋 OpenAI callCompletionModel call');
1996
2090
  }
1997
2091
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2092
+ return [4 /*yield*/, this.getClient()];
2093
+ case 1:
2094
+ client = _c.sent();
1998
2095
  // TODO: [☂] Use here more modelRequirements
1999
2096
  if (modelRequirements.modelVariant !== 'COMPLETION') {
2000
2097
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2001
2098
  }
2002
- _d.label = 1;
2003
- case 1:
2004
- _d.trys.push([1, 3, , 4]);
2099
+ _c.label = 2;
2100
+ case 2:
2101
+ _c.trys.push([2, 4, , 5]);
2005
2102
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
2006
2103
  modelSettings = {
2007
2104
  maxTokens: modelRequirements.maxTokens || 2000,
@@ -2023,9 +2120,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
2023
2120
  [rawPromptContent],
2024
2121
  modelSettings,
2025
2122
  ];
2026
- return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
2027
- case 2:
2028
- rawResponse = _d.sent();
2123
+ return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
2124
+ case 3:
2125
+ rawResponse = _c.sent();
2029
2126
  if (this.options.isVerbose) {
2030
2127
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2031
2128
  }
@@ -2057,10 +2154,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
2057
2154
  rawResponse: rawResponse,
2058
2155
  // <- [🗯]
2059
2156
  }];
2060
- case 3:
2061
- error_2 = _d.sent();
2157
+ case 4:
2158
+ error_2 = _c.sent();
2062
2159
  throw this.transformAzureError(error_2);
2063
- case 4: return [2 /*return*/];
2160
+ case 5: return [2 /*return*/];
2064
2161
  }
2065
2162
  });
2066
2163
  });
@@ -2076,25 +2173,6 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
2076
2173
  var code = azureError.code, message = azureError.message;
2077
2174
  return new PipelineExecutionError("".concat(code, ": ").concat(message));
2078
2175
  };
2079
- /**
2080
- * List all available Azure OpenAI models that can be used
2081
- */
2082
- AzureOpenAiExecutionTools.prototype.listModels = function () {
2083
- return __awaiter(this, void 0, void 0, function () {
2084
- return __generator(this, function (_a) {
2085
- // TODO: !!! Do here some filtering which models are really available as deployment
2086
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
2087
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
2088
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
2089
- return ({
2090
- modelTitle: "Azure ".concat(modelTitle),
2091
- modelName: modelName,
2092
- modelVariant: modelVariant,
2093
- });
2094
- })];
2095
- });
2096
- });
2097
- };
2098
2176
  return AzureOpenAiExecutionTools;
2099
2177
  }());
2100
2178
  /**
@@ -2156,12 +2234,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
2156
2234
  function OpenAiExecutionTools(options) {
2157
2235
  if (options === void 0) { options = {}; }
2158
2236
  this.options = options;
2159
- // Note: Passing only OpenAI relevant options to OpenAI constructor
2160
- var openAiOptions = __assign({}, options);
2161
- delete openAiOptions.isVerbose;
2162
- delete openAiOptions.user;
2163
- this.client = new OpenAI(__assign({}, openAiOptions));
2164
- // <- TODO: !!!!!! Lazy-load client
2237
+ /**
2238
+ * OpenAI API client.
2239
+ */
2240
+ this.client = null;
2165
2241
  }
2166
2242
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
2167
2243
  get: function () {
@@ -2177,12 +2253,54 @@ var OpenAiExecutionTools = /** @class */ (function () {
2177
2253
  enumerable: false,
2178
2254
  configurable: true
2179
2255
  });
2256
+ OpenAiExecutionTools.prototype.getClient = function () {
2257
+ return __awaiter(this, void 0, void 0, function () {
2258
+ var openAiOptions;
2259
+ return __generator(this, function (_a) {
2260
+ if (this.client === null) {
2261
+ openAiOptions = __assign({}, this.options);
2262
+ delete openAiOptions.isVerbose;
2263
+ delete openAiOptions.user;
2264
+ this.client = new OpenAI(__assign({}, openAiOptions));
2265
+ }
2266
+ return [2 /*return*/, this.client];
2267
+ });
2268
+ });
2269
+ };
2270
+ /**
2271
+ * Check the `options` passed to `constructor`
2272
+ */
2273
+ OpenAiExecutionTools.prototype.checkConfiguration = function () {
2274
+ return __awaiter(this, void 0, void 0, function () {
2275
+ return __generator(this, function (_a) {
2276
+ switch (_a.label) {
2277
+ case 0: return [4 /*yield*/, this.getClient()];
2278
+ case 1:
2279
+ _a.sent();
2280
+ return [2 /*return*/];
2281
+ }
2282
+ });
2283
+ });
2284
+ };
2285
+ /**
2286
+ * List all available OpenAI models that can be used
2287
+ */
2288
+ OpenAiExecutionTools.prototype.listModels = function () {
2289
+ /*
2290
+ Note: Dynamic lising of the models
2291
+ const models = await this.openai.models.list({});
2292
+
2293
+ console.log({ models });
2294
+ console.log(models.data);
2295
+ */
2296
+ return OPENAI_MODELS;
2297
+ };
2180
2298
  /**
2181
2299
  * Calls OpenAI API to use a chat model.
2182
2300
  */
2183
2301
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
2184
2302
  return __awaiter(this, void 0, void 0, function () {
2185
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2303
+ var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2186
2304
  return __generator(this, function (_a) {
2187
2305
  switch (_a.label) {
2188
2306
  case 0:
@@ -2190,6 +2308,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
2190
2308
  console.info('💬 OpenAI callChatModel call', { prompt: prompt });
2191
2309
  }
2192
2310
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
2311
+ return [4 /*yield*/, this.getClient()];
2312
+ case 1:
2313
+ client = _a.sent();
2193
2314
  // TODO: [☂] Use here more modelRequirements
2194
2315
  if (modelRequirements.modelVariant !== 'CHAT') {
2195
2316
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -2226,8 +2347,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
2226
2347
  if (this.options.isVerbose) {
2227
2348
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2228
2349
  }
2229
- return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
2230
- case 1:
2350
+ return [4 /*yield*/, client.chat.completions.create(rawRequest)];
2351
+ case 2:
2231
2352
  rawResponse = _a.sent();
2232
2353
  if (this.options.isVerbose) {
2233
2354
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -2268,7 +2389,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
2268
2389
  */
2269
2390
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
2270
2391
  return __awaiter(this, void 0, void 0, function () {
2271
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2392
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2272
2393
  return __generator(this, function (_a) {
2273
2394
  switch (_a.label) {
2274
2395
  case 0:
@@ -2276,6 +2397,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
2276
2397
  console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
2277
2398
  }
2278
2399
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2400
+ return [4 /*yield*/, this.getClient()];
2401
+ case 1:
2402
+ client = _a.sent();
2279
2403
  // TODO: [☂] Use here more modelRequirements
2280
2404
  if (modelRequirements.modelVariant !== 'COMPLETION') {
2281
2405
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
@@ -2295,8 +2419,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
2295
2419
  if (this.options.isVerbose) {
2296
2420
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2297
2421
  }
2298
- return [4 /*yield*/, this.client.completions.create(rawRequest)];
2299
- case 1:
2422
+ return [4 /*yield*/, client.completions.create(rawRequest)];
2423
+ case 2:
2300
2424
  rawResponse = _a.sent();
2301
2425
  if (this.options.isVerbose) {
2302
2426
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -2334,7 +2458,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
2334
2458
  */
2335
2459
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
2336
2460
  return __awaiter(this, void 0, void 0, function () {
2337
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2461
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
2338
2462
  return __generator(this, function (_a) {
2339
2463
  switch (_a.label) {
2340
2464
  case 0:
@@ -2342,6 +2466,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
2342
2466
  console.info('🖋 OpenAI embedding call', { prompt: prompt });
2343
2467
  }
2344
2468
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
2469
+ return [4 /*yield*/, this.getClient()];
2470
+ case 1:
2471
+ client = _a.sent();
2345
2472
  // TODO: [☂] Use here more modelRequirements
2346
2473
  if (modelRequirements.modelVariant !== 'EMBEDDING') {
2347
2474
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
@@ -2356,8 +2483,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
2356
2483
  if (this.options.isVerbose) {
2357
2484
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2358
2485
  }
2359
- return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
2360
- case 1:
2486
+ return [4 /*yield*/, client.embeddings.create(rawRequest)];
2487
+ case 2:
2361
2488
  rawResponse = _a.sent();
2362
2489
  if (this.options.isVerbose) {
2363
2490
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -2423,20 +2550,6 @@ var OpenAiExecutionTools = /** @class */ (function () {
2423
2550
  OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
2424
2551
  return this.getDefaultModel('text-embedding-3-large');
2425
2552
  };
2426
- // <- Note: [🤖] getDefaultXxxModel
2427
- /**
2428
- * List all available OpenAI models that can be used
2429
- */
2430
- OpenAiExecutionTools.prototype.listModels = function () {
2431
- /*
2432
- Note: Dynamic lising of the models
2433
- const models = await this.openai.models.list({});
2434
-
2435
- console.log({ models });
2436
- console.log(models.data);
2437
- */
2438
- return OPENAI_MODELS;
2439
- };
2440
2553
  return OpenAiExecutionTools;
2441
2554
  }());
2442
2555
  /**