@promptbook/node 0.66.0-5 → 0.66.0-7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/esm/index.es.js +315 -165
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/cli.index.d.ts +6 -2
  4. package/esm/typings/src/cli/main.d.ts +2 -2
  5. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
  6. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
  7. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
  8. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
  9. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
  11. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +1 -0
  12. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
  13. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
  14. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
  15. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
  16. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
  17. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +1 -0
  18. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
  19. package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
  20. package/package.json +2 -2
  21. package/umd/index.umd.js +315 -165
  22. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -17,7 +17,7 @@ import OpenAI from 'openai';
17
17
  /**
18
18
  * The version of the Promptbook library
19
19
  */
20
- var PROMPTBOOK_VERSION = '0.66.0-4';
20
+ var PROMPTBOOK_VERSION = '0.66.0-6';
21
21
  // TODO: !!!! List here all the versions and annotate + put into script
22
22
 
23
23
  /*! *****************************************************************************
@@ -696,7 +696,7 @@ function forEachAsync(array, options, callbackfunction) {
696
696
  });
697
697
  }
698
698
 
699
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
699
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-6",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
700
700
 
701
701
  /**
702
702
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1919,6 +1919,60 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1919
1919
  enumerable: false,
1920
1920
  configurable: true
1921
1921
  });
1922
+ /**
1923
+ * Check the configuration of all execution tools
1924
+ */
1925
+ MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
1926
+ return __awaiter(this, void 0, void 0, function () {
1927
+ return __generator(this, function (_a) {
1928
+ return [2 /*return*/];
1929
+ });
1930
+ });
1931
+ };
1932
+ /**
1933
+ * List all available models that can be used
1934
+ * This lists is a combination of all available models from all execution tools
1935
+ */
1936
+ MultipleLlmExecutionTools.prototype.listModels = function () {
1937
+ return __awaiter(this, void 0, void 0, function () {
1938
+ var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
1939
+ var e_1, _c;
1940
+ return __generator(this, function (_d) {
1941
+ switch (_d.label) {
1942
+ case 0:
1943
+ availableModels = [];
1944
+ _d.label = 1;
1945
+ case 1:
1946
+ _d.trys.push([1, 6, 7, 8]);
1947
+ _a = __values(this.llmExecutionTools), _b = _a.next();
1948
+ _d.label = 2;
1949
+ case 2:
1950
+ if (!!_b.done) return [3 /*break*/, 5];
1951
+ llmExecutionTools = _b.value;
1952
+ return [4 /*yield*/, llmExecutionTools.listModels()];
1953
+ case 3:
1954
+ models = _d.sent();
1955
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
1956
+ _d.label = 4;
1957
+ case 4:
1958
+ _b = _a.next();
1959
+ return [3 /*break*/, 2];
1960
+ case 5: return [3 /*break*/, 8];
1961
+ case 6:
1962
+ e_1_1 = _d.sent();
1963
+ e_1 = { error: e_1_1 };
1964
+ return [3 /*break*/, 8];
1965
+ case 7:
1966
+ try {
1967
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
1968
+ }
1969
+ finally { if (e_1) throw e_1.error; }
1970
+ return [7 /*endfinally*/];
1971
+ case 8: return [2 /*return*/, availableModels];
1972
+ }
1973
+ });
1974
+ });
1975
+ };
1922
1976
  /**
1923
1977
  * Calls the best available chat model
1924
1978
  */
@@ -1945,8 +1999,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1945
1999
  */
1946
2000
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
1947
2001
  return __awaiter(this, void 0, void 0, function () {
1948
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
1949
- var e_1, _d;
2002
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2003
+ var e_2, _d;
1950
2004
  var _this = this;
1951
2005
  return __generator(this, function (_e) {
1952
2006
  switch (_e.label) {
@@ -2002,14 +2056,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2002
2056
  return [3 /*break*/, 2];
2003
2057
  case 14: return [3 /*break*/, 17];
2004
2058
  case 15:
2005
- e_1_1 = _e.sent();
2006
- e_1 = { error: e_1_1 };
2059
+ e_2_1 = _e.sent();
2060
+ e_2 = { error: e_2_1 };
2007
2061
  return [3 /*break*/, 17];
2008
2062
  case 16:
2009
2063
  try {
2010
2064
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2011
2065
  }
2012
- finally { if (e_1) throw e_1.error; }
2066
+ finally { if (e_2) throw e_2.error; }
2013
2067
  return [7 /*endfinally*/];
2014
2068
  case 17:
2015
2069
  if (errors.length === 1) {
@@ -2037,50 +2091,6 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2037
2091
  });
2038
2092
  });
2039
2093
  };
2040
- /**
2041
- * List all available models that can be used
2042
- * This lists is a combination of all available models from all execution tools
2043
- */
2044
- MultipleLlmExecutionTools.prototype.listModels = function () {
2045
- return __awaiter(this, void 0, void 0, function () {
2046
- var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
2047
- var e_2, _c;
2048
- return __generator(this, function (_d) {
2049
- switch (_d.label) {
2050
- case 0:
2051
- availableModels = [];
2052
- _d.label = 1;
2053
- case 1:
2054
- _d.trys.push([1, 6, 7, 8]);
2055
- _a = __values(this.llmExecutionTools), _b = _a.next();
2056
- _d.label = 2;
2057
- case 2:
2058
- if (!!_b.done) return [3 /*break*/, 5];
2059
- llmExecutionTools = _b.value;
2060
- return [4 /*yield*/, llmExecutionTools.listModels()];
2061
- case 3:
2062
- models = _d.sent();
2063
- availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
2064
- _d.label = 4;
2065
- case 4:
2066
- _b = _a.next();
2067
- return [3 /*break*/, 2];
2068
- case 5: return [3 /*break*/, 8];
2069
- case 6:
2070
- e_2_1 = _d.sent();
2071
- e_2 = { error: e_2_1 };
2072
- return [3 /*break*/, 8];
2073
- case 7:
2074
- try {
2075
- if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2076
- }
2077
- finally { if (e_2) throw e_2.error; }
2078
- return [7 /*endfinally*/];
2079
- case 8: return [2 /*return*/, availableModels];
2080
- }
2081
- });
2082
- });
2083
- };
2084
2094
  return MultipleLlmExecutionTools;
2085
2095
  }());
2086
2096
  /**
@@ -3515,6 +3525,13 @@ function countTotalUsage(llmTools) {
3515
3525
  // TODO: [🧠] Maybe put here some suffix
3516
3526
  return llmTools.description;
3517
3527
  },
3528
+ checkConfiguration: function () {
3529
+ return __awaiter(this, void 0, void 0, function () {
3530
+ return __generator(this, function (_a) {
3531
+ return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
3532
+ });
3533
+ });
3534
+ },
3518
3535
  listModels: function () {
3519
3536
  return /* not await */ llmTools.listModels();
3520
3537
  },
@@ -6286,6 +6303,20 @@ var EnvironmentMismatchError = /** @class */ (function (_super) {
6286
6303
  return EnvironmentMismatchError;
6287
6304
  }(Error));
6288
6305
 
6306
+ /**
6307
+ * @@@
6308
+ *
6309
+ * Note: `$` is used to indicate that this function is not a pure function - it access global
6310
+ *
6311
+ * @public exported from `@promptbook/utils`
6312
+ */
6313
+ function $getGlobalScope() {
6314
+ return Function('return this')();
6315
+ }
6316
+ /***
6317
+ * TODO: !!!!! Make private and promptbook registry from this
6318
+ */
6319
+
6289
6320
  /**
6290
6321
  * Register is @@@
6291
6322
  *
@@ -6300,13 +6331,30 @@ var Register = /** @class */ (function () {
6300
6331
  return this.storage;
6301
6332
  };
6302
6333
  Register.prototype.register = function (registered) {
6303
- // !!!!!! <- TODO: What to return here
6304
- // TODO: !!!!!! Compare if same is not already registered
6305
- this.storage.push(registered);
6334
+ // <- TODO: What to return here
6335
+ var packageName = registered.packageName, className = registered.className;
6336
+ var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
6337
+ var existingRegistration = this.storage[existingRegistrationIndex];
6338
+ if (existingRegistration) {
6339
+ console.warn("!!!!!! Re-registering ".concat(packageName, ".").concat(className, " again"));
6340
+ this.storage[existingRegistrationIndex] = registered;
6341
+ }
6342
+ else {
6343
+ this.storage.push(registered);
6344
+ }
6306
6345
  };
6307
6346
  return Register;
6308
6347
  }());
6309
6348
 
6349
+ // TODO: !!!!!! Move this logic to Register and rename to $Register
6350
+ var globalScope = $getGlobalScope();
6351
+ if (globalScope.$llmToolsMetadataRegister === undefined) {
6352
+ globalScope.$llmToolsMetadataRegister = [];
6353
+ }
6354
+ else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
6355
+ throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
6356
+ }
6357
+ var _ = globalScope.$llmToolsMetadataRegister;
6310
6358
  /**
6311
6359
  * @@@
6312
6360
  *
@@ -6314,9 +6362,8 @@ var Register = /** @class */ (function () {
6314
6362
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6315
6363
  * @public exported from `@promptbook/core`
6316
6364
  */
6317
- var $llmToolsMetadataRegister = new Register([
6318
- // TODO: !!!!!! Take from global scope
6319
- ]);
6365
+ var $llmToolsMetadataRegister = new Register(_);
6366
+ $getGlobalScope().$llmToolsMetadataRegister;
6320
6367
 
6321
6368
  /**
6322
6369
  * @@@
@@ -6379,6 +6426,29 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6379
6426
  enumerable: false,
6380
6427
  configurable: true
6381
6428
  });
6429
+ /**
6430
+ * Check the configuration of all execution tools
6431
+ */
6432
+ RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6433
+ return __awaiter(this, void 0, void 0, function () {
6434
+ return __generator(this, function (_a) {
6435
+ return [2 /*return*/];
6436
+ });
6437
+ });
6438
+ };
6439
+ /**
6440
+ * List all available models that can be used
6441
+ */
6442
+ RemoteLlmExecutionTools.prototype.listModels = function () {
6443
+ return __awaiter(this, void 0, void 0, function () {
6444
+ return __generator(this, function (_a) {
6445
+ return [2 /*return*/, (this.options.models ||
6446
+ [
6447
+ /* !!!!!! */
6448
+ ])];
6449
+ });
6450
+ });
6451
+ };
6382
6452
  /**
6383
6453
  * Creates a connection to the remote proxy server.
6384
6454
  */
@@ -6473,19 +6543,6 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6473
6543
  });
6474
6544
  });
6475
6545
  };
6476
- /**
6477
- * List all available models that can be used
6478
- */
6479
- RemoteLlmExecutionTools.prototype.listModels = function () {
6480
- return __awaiter(this, void 0, void 0, function () {
6481
- return __generator(this, function (_a) {
6482
- return [2 /*return*/, (this.options.models ||
6483
- [
6484
- /* !!! */
6485
- ])];
6486
- });
6487
- });
6488
- };
6489
6546
  return RemoteLlmExecutionTools;
6490
6547
  }());
6491
6548
  /**
@@ -6682,12 +6739,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6682
6739
  function AnthropicClaudeExecutionTools(options) {
6683
6740
  if (options === void 0) { options = { isProxied: false }; }
6684
6741
  this.options = options;
6685
- // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
6686
- var anthropicOptions = __assign({}, options);
6687
- delete anthropicOptions.isVerbose;
6688
- delete anthropicOptions.isProxied;
6689
- this.client = new Anthropic(anthropicOptions);
6690
- // <- TODO: !!!!!! Lazy-load client
6742
+ /**
6743
+ * Anthropic Claude API client.
6744
+ */
6745
+ this.client = null;
6691
6746
  }
6692
6747
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6693
6748
  get: function () {
@@ -6703,12 +6758,47 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6703
6758
  enumerable: false,
6704
6759
  configurable: true
6705
6760
  });
6761
+ AnthropicClaudeExecutionTools.prototype.getClient = function () {
6762
+ return __awaiter(this, void 0, void 0, function () {
6763
+ var anthropicOptions;
6764
+ return __generator(this, function (_a) {
6765
+ if (this.client === null) {
6766
+ anthropicOptions = __assign({}, this.options);
6767
+ delete anthropicOptions.isVerbose;
6768
+ delete anthropicOptions.isProxied;
6769
+ this.client = new Anthropic(anthropicOptions);
6770
+ }
6771
+ return [2 /*return*/, this.client];
6772
+ });
6773
+ });
6774
+ };
6775
+ /**
6776
+ * Check the `options` passed to `constructor`
6777
+ */
6778
+ AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6779
+ return __awaiter(this, void 0, void 0, function () {
6780
+ return __generator(this, function (_a) {
6781
+ switch (_a.label) {
6782
+ case 0: return [4 /*yield*/, this.getClient()];
6783
+ case 1:
6784
+ _a.sent();
6785
+ return [2 /*return*/];
6786
+ }
6787
+ });
6788
+ });
6789
+ };
6790
+ /**
6791
+ * List all available Anthropic Claude models that can be used
6792
+ */
6793
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
6794
+ return ANTHROPIC_CLAUDE_MODELS;
6795
+ };
6706
6796
  /**
6707
6797
  * Calls Anthropic Claude API to use a chat model.
6708
6798
  */
6709
6799
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6710
6800
  return __awaiter(this, void 0, void 0, function () {
6711
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6801
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6712
6802
  return __generator(this, function (_a) {
6713
6803
  switch (_a.label) {
6714
6804
  case 0:
@@ -6716,6 +6806,9 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6716
6806
  console.info('💬 Anthropic Claude callChatModel call');
6717
6807
  }
6718
6808
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6809
+ return [4 /*yield*/, this.getClient()];
6810
+ case 1:
6811
+ client = _a.sent();
6719
6812
  // TODO: [☂] Use here more modelRequirements
6720
6813
  if (modelRequirements.modelVariant !== 'CHAT') {
6721
6814
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -6742,8 +6835,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6742
6835
  if (this.options.isVerbose) {
6743
6836
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6744
6837
  }
6745
- return [4 /*yield*/, this.client.messages.create(rawRequest)];
6746
- case 1:
6838
+ return [4 /*yield*/, client.messages.create(rawRequest)];
6839
+ case 2:
6747
6840
  rawResponse = _a.sent();
6748
6841
  if (this.options.isVerbose) {
6749
6842
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -6874,13 +6967,6 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6874
6967
  AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
6875
6968
  return this.getDefaultModel('claude-3-opus');
6876
6969
  };
6877
- // <- Note: [🤖] getDefaultXxxModel
6878
- /**
6879
- * List all available Anthropic Claude models that can be used
6880
- */
6881
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
6882
- return ANTHROPIC_CLAUDE_MODELS;
6883
- };
6884
6970
  return AnthropicClaudeExecutionTools;
6885
6971
  }());
6886
6972
  /**
@@ -7287,10 +7373,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7287
7373
  */
7288
7374
  function AzureOpenAiExecutionTools(options) {
7289
7375
  this.options = options;
7290
- this.client = new OpenAIClient(
7291
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7292
- "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7293
- // <- TODO: !!!!!! Lazy-load client
7376
+ /**
7377
+ * OpenAI Azure API client.
7378
+ */
7379
+ this.client = null;
7294
7380
  }
7295
7381
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7296
7382
  get: function () {
@@ -7306,28 +7392,74 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7306
7392
  enumerable: false,
7307
7393
  configurable: true
7308
7394
  });
7395
+ AzureOpenAiExecutionTools.prototype.getClient = function () {
7396
+ return __awaiter(this, void 0, void 0, function () {
7397
+ return __generator(this, function (_a) {
7398
+ if (this.client === null) {
7399
+ this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
7400
+ }
7401
+ return [2 /*return*/, this.client];
7402
+ });
7403
+ });
7404
+ };
7405
+ /**
7406
+ * Check the `options` passed to `constructor`
7407
+ */
7408
+ AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7409
+ return __awaiter(this, void 0, void 0, function () {
7410
+ return __generator(this, function (_a) {
7411
+ switch (_a.label) {
7412
+ case 0: return [4 /*yield*/, this.getClient()];
7413
+ case 1:
7414
+ _a.sent();
7415
+ return [2 /*return*/];
7416
+ }
7417
+ });
7418
+ });
7419
+ };
7420
+ /**
7421
+ * List all available Azure OpenAI models that can be used
7422
+ */
7423
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
7424
+ return __awaiter(this, void 0, void 0, function () {
7425
+ return __generator(this, function (_a) {
7426
+ // TODO: !!! Do here some filtering which models are really available as deployment
7427
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7428
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7429
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7430
+ return ({
7431
+ modelTitle: "Azure ".concat(modelTitle),
7432
+ modelName: modelName,
7433
+ modelVariant: modelVariant,
7434
+ });
7435
+ })];
7436
+ });
7437
+ });
7438
+ };
7309
7439
  /**
7310
7440
  * Calls OpenAI API to use a chat model.
7311
7441
  */
7312
7442
  AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7313
7443
  var _a, _b;
7314
7444
  return __awaiter(this, void 0, void 0, function () {
7315
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7316
- var _c;
7317
- return __generator(this, function (_d) {
7318
- switch (_d.label) {
7445
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7446
+ return __generator(this, function (_c) {
7447
+ switch (_c.label) {
7319
7448
  case 0:
7320
7449
  if (this.options.isVerbose) {
7321
7450
  console.info('💬 OpenAI callChatModel call');
7322
7451
  }
7323
7452
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7453
+ return [4 /*yield*/, this.getClient()];
7454
+ case 1:
7455
+ client = _c.sent();
7324
7456
  // TODO: [☂] Use here more modelRequirements
7325
7457
  if (modelRequirements.modelVariant !== 'CHAT') {
7326
7458
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7327
7459
  }
7328
- _d.label = 1;
7329
- case 1:
7330
- _d.trys.push([1, 3, , 4]);
7460
+ _c.label = 2;
7461
+ case 2:
7462
+ _c.trys.push([2, 4, , 5]);
7331
7463
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7332
7464
  modelSettings = {
7333
7465
  maxTokens: modelRequirements.maxTokens,
@@ -7357,9 +7489,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7357
7489
  console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7358
7490
  }
7359
7491
  rawRequest = [modelName, messages, modelSettings];
7360
- return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7361
- case 2:
7362
- rawResponse = _d.sent();
7492
+ return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7493
+ case 3:
7494
+ rawResponse = _c.sent();
7363
7495
  if (this.options.isVerbose) {
7364
7496
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7365
7497
  }
@@ -7394,10 +7526,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7394
7526
  rawResponse: rawResponse,
7395
7527
  // <- [🗯]
7396
7528
  }];
7397
- case 3:
7398
- error_1 = _d.sent();
7529
+ case 4:
7530
+ error_1 = _c.sent();
7399
7531
  throw this.transformAzureError(error_1);
7400
- case 4: return [2 /*return*/];
7532
+ case 5: return [2 /*return*/];
7401
7533
  }
7402
7534
  });
7403
7535
  });
@@ -7408,22 +7540,24 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7408
7540
  AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7409
7541
  var _a, _b;
7410
7542
  return __awaiter(this, void 0, void 0, function () {
7411
- var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7412
- var _c;
7413
- return __generator(this, function (_d) {
7414
- switch (_d.label) {
7543
+ var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7544
+ return __generator(this, function (_c) {
7545
+ switch (_c.label) {
7415
7546
  case 0:
7416
7547
  if (this.options.isVerbose) {
7417
7548
  console.info('🖋 OpenAI callCompletionModel call');
7418
7549
  }
7419
7550
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7551
+ return [4 /*yield*/, this.getClient()];
7552
+ case 1:
7553
+ client = _c.sent();
7420
7554
  // TODO: [☂] Use here more modelRequirements
7421
7555
  if (modelRequirements.modelVariant !== 'COMPLETION') {
7422
7556
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7423
7557
  }
7424
- _d.label = 1;
7425
- case 1:
7426
- _d.trys.push([1, 3, , 4]);
7558
+ _c.label = 2;
7559
+ case 2:
7560
+ _c.trys.push([2, 4, , 5]);
7427
7561
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7428
7562
  modelSettings = {
7429
7563
  maxTokens: modelRequirements.maxTokens || 2000,
@@ -7445,9 +7579,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7445
7579
  [rawPromptContent],
7446
7580
  modelSettings,
7447
7581
  ];
7448
- return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7449
- case 2:
7450
- rawResponse = _d.sent();
7582
+ return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7583
+ case 3:
7584
+ rawResponse = _c.sent();
7451
7585
  if (this.options.isVerbose) {
7452
7586
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7453
7587
  }
@@ -7479,10 +7613,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7479
7613
  rawResponse: rawResponse,
7480
7614
  // <- [🗯]
7481
7615
  }];
7482
- case 3:
7483
- error_2 = _d.sent();
7616
+ case 4:
7617
+ error_2 = _c.sent();
7484
7618
  throw this.transformAzureError(error_2);
7485
- case 4: return [2 /*return*/];
7619
+ case 5: return [2 /*return*/];
7486
7620
  }
7487
7621
  });
7488
7622
  });
@@ -7498,25 +7632,6 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7498
7632
  var code = azureError.code, message = azureError.message;
7499
7633
  return new PipelineExecutionError("".concat(code, ": ").concat(message));
7500
7634
  };
7501
- /**
7502
- * List all available Azure OpenAI models that can be used
7503
- */
7504
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7505
- return __awaiter(this, void 0, void 0, function () {
7506
- return __generator(this, function (_a) {
7507
- // TODO: !!! Do here some filtering which models are really available as deployment
7508
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7509
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7510
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7511
- return ({
7512
- modelTitle: "Azure ".concat(modelTitle),
7513
- modelName: modelName,
7514
- modelVariant: modelVariant,
7515
- });
7516
- })];
7517
- });
7518
- });
7519
- };
7520
7635
  return AzureOpenAiExecutionTools;
7521
7636
  }());
7522
7637
  /**
@@ -7578,12 +7693,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
7578
7693
  function OpenAiExecutionTools(options) {
7579
7694
  if (options === void 0) { options = {}; }
7580
7695
  this.options = options;
7581
- // Note: Passing only OpenAI relevant options to OpenAI constructor
7582
- var openAiOptions = __assign({}, options);
7583
- delete openAiOptions.isVerbose;
7584
- delete openAiOptions.user;
7585
- this.client = new OpenAI(__assign({}, openAiOptions));
7586
- // <- TODO: !!!!!! Lazy-load client
7696
+ /**
7697
+ * OpenAI API client.
7698
+ */
7699
+ this.client = null;
7587
7700
  }
7588
7701
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7589
7702
  get: function () {
@@ -7599,12 +7712,54 @@ var OpenAiExecutionTools = /** @class */ (function () {
7599
7712
  enumerable: false,
7600
7713
  configurable: true
7601
7714
  });
7715
+ OpenAiExecutionTools.prototype.getClient = function () {
7716
+ return __awaiter(this, void 0, void 0, function () {
7717
+ var openAiOptions;
7718
+ return __generator(this, function (_a) {
7719
+ if (this.client === null) {
7720
+ openAiOptions = __assign({}, this.options);
7721
+ delete openAiOptions.isVerbose;
7722
+ delete openAiOptions.user;
7723
+ this.client = new OpenAI(__assign({}, openAiOptions));
7724
+ }
7725
+ return [2 /*return*/, this.client];
7726
+ });
7727
+ });
7728
+ };
7729
+ /**
7730
+ * Check the `options` passed to `constructor`
7731
+ */
7732
+ OpenAiExecutionTools.prototype.checkConfiguration = function () {
7733
+ return __awaiter(this, void 0, void 0, function () {
7734
+ return __generator(this, function (_a) {
7735
+ switch (_a.label) {
7736
+ case 0: return [4 /*yield*/, this.getClient()];
7737
+ case 1:
7738
+ _a.sent();
7739
+ return [2 /*return*/];
7740
+ }
7741
+ });
7742
+ });
7743
+ };
7744
+ /**
7745
+ * List all available OpenAI models that can be used
7746
+ */
7747
+ OpenAiExecutionTools.prototype.listModels = function () {
7748
+ /*
7749
+ Note: Dynamic lising of the models
7750
+ const models = await this.openai.models.list({});
7751
+
7752
+ console.log({ models });
7753
+ console.log(models.data);
7754
+ */
7755
+ return OPENAI_MODELS;
7756
+ };
7602
7757
  /**
7603
7758
  * Calls OpenAI API to use a chat model.
7604
7759
  */
7605
7760
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7606
7761
  return __awaiter(this, void 0, void 0, function () {
7607
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7762
+ var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7608
7763
  return __generator(this, function (_a) {
7609
7764
  switch (_a.label) {
7610
7765
  case 0:
@@ -7612,6 +7767,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7612
7767
  console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7613
7768
  }
7614
7769
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7770
+ return [4 /*yield*/, this.getClient()];
7771
+ case 1:
7772
+ client = _a.sent();
7615
7773
  // TODO: [☂] Use here more modelRequirements
7616
7774
  if (modelRequirements.modelVariant !== 'CHAT') {
7617
7775
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -7648,8 +7806,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7648
7806
  if (this.options.isVerbose) {
7649
7807
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7650
7808
  }
7651
- return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
7652
- case 1:
7809
+ return [4 /*yield*/, client.chat.completions.create(rawRequest)];
7810
+ case 2:
7653
7811
  rawResponse = _a.sent();
7654
7812
  if (this.options.isVerbose) {
7655
7813
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7690,7 +7848,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7690
7848
  */
7691
7849
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7692
7850
  return __awaiter(this, void 0, void 0, function () {
7693
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7851
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7694
7852
  return __generator(this, function (_a) {
7695
7853
  switch (_a.label) {
7696
7854
  case 0:
@@ -7698,6 +7856,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7698
7856
  console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
7699
7857
  }
7700
7858
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7859
+ return [4 /*yield*/, this.getClient()];
7860
+ case 1:
7861
+ client = _a.sent();
7701
7862
  // TODO: [☂] Use here more modelRequirements
7702
7863
  if (modelRequirements.modelVariant !== 'COMPLETION') {
7703
7864
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
@@ -7717,8 +7878,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7717
7878
  if (this.options.isVerbose) {
7718
7879
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7719
7880
  }
7720
- return [4 /*yield*/, this.client.completions.create(rawRequest)];
7721
- case 1:
7881
+ return [4 /*yield*/, client.completions.create(rawRequest)];
7882
+ case 2:
7722
7883
  rawResponse = _a.sent();
7723
7884
  if (this.options.isVerbose) {
7724
7885
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7756,7 +7917,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7756
7917
  */
7757
7918
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
7758
7919
  return __awaiter(this, void 0, void 0, function () {
7759
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7920
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7760
7921
  return __generator(this, function (_a) {
7761
7922
  switch (_a.label) {
7762
7923
  case 0:
@@ -7764,6 +7925,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7764
7925
  console.info('🖋 OpenAI embedding call', { prompt: prompt });
7765
7926
  }
7766
7927
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7928
+ return [4 /*yield*/, this.getClient()];
7929
+ case 1:
7930
+ client = _a.sent();
7767
7931
  // TODO: [☂] Use here more modelRequirements
7768
7932
  if (modelRequirements.modelVariant !== 'EMBEDDING') {
7769
7933
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
@@ -7778,8 +7942,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7778
7942
  if (this.options.isVerbose) {
7779
7943
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7780
7944
  }
7781
- return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
7782
- case 1:
7945
+ return [4 /*yield*/, client.embeddings.create(rawRequest)];
7946
+ case 2:
7783
7947
  rawResponse = _a.sent();
7784
7948
  if (this.options.isVerbose) {
7785
7949
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7845,20 +8009,6 @@ var OpenAiExecutionTools = /** @class */ (function () {
7845
8009
  OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
7846
8010
  return this.getDefaultModel('text-embedding-3-large');
7847
8011
  };
7848
- // <- Note: [🤖] getDefaultXxxModel
7849
- /**
7850
- * List all available OpenAI models that can be used
7851
- */
7852
- OpenAiExecutionTools.prototype.listModels = function () {
7853
- /*
7854
- Note: Dynamic lising of the models
7855
- const models = await this.openai.models.list({});
7856
-
7857
- console.log({ models });
7858
- console.log(models.data);
7859
- */
7860
- return OPENAI_MODELS;
7861
- };
7862
8012
  return OpenAiExecutionTools;
7863
8013
  }());
7864
8014
  /**
@@ -7959,7 +8109,7 @@ function createLlmToolsFromEnv(options) {
7959
8109
  var configuration = createLlmToolsFromConfigurationFromEnv();
7960
8110
  if (configuration.length === 0) {
7961
8111
  // TODO: [🥃]
7962
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
8112
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n !!!!!!! List all available LLM tools in your environment\n - Azure \n - OpenAI (not imported)\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7963
8113
  }
7964
8114
  return createLlmToolsFromConfiguration(configuration, options);
7965
8115
  }