@promptbook/node 0.66.0-6 → 0.66.0-8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. package/esm/index.es.js +316 -165
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/cli.index.d.ts +4 -2
  5. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  6. package/esm/typings/src/cli/main.d.ts +2 -2
  7. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
  8. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
  9. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
  10. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
  11. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
  13. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
  14. package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
  15. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
  16. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +11 -0
  17. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
  18. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
  19. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
  20. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
  21. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
  22. package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
  23. package/package.json +2 -2
  24. package/umd/index.umd.js +316 -165
  25. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -17,7 +17,7 @@ import OpenAI from 'openai';
17
17
  /**
18
18
  * The version of the Promptbook library
19
19
  */
20
- var PROMPTBOOK_VERSION = '0.66.0-5';
20
+ var PROMPTBOOK_VERSION = '0.66.0-7';
21
21
  // TODO: !!!! List here all the versions and annotate + put into script
22
22
 
23
23
  /*! *****************************************************************************
@@ -696,7 +696,7 @@ function forEachAsync(array, options, callbackfunction) {
696
696
  });
697
697
  }
698
698
 
699
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
699
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
700
700
 
701
701
  /**
702
702
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -1919,6 +1919,60 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1919
1919
  enumerable: false,
1920
1920
  configurable: true
1921
1921
  });
1922
+ /**
1923
+ * Check the configuration of all execution tools
1924
+ */
1925
+ MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
1926
+ return __awaiter(this, void 0, void 0, function () {
1927
+ return __generator(this, function (_a) {
1928
+ return [2 /*return*/];
1929
+ });
1930
+ });
1931
+ };
1932
+ /**
1933
+ * List all available models that can be used
1934
+ * This lists is a combination of all available models from all execution tools
1935
+ */
1936
+ MultipleLlmExecutionTools.prototype.listModels = function () {
1937
+ return __awaiter(this, void 0, void 0, function () {
1938
+ var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
1939
+ var e_1, _c;
1940
+ return __generator(this, function (_d) {
1941
+ switch (_d.label) {
1942
+ case 0:
1943
+ availableModels = [];
1944
+ _d.label = 1;
1945
+ case 1:
1946
+ _d.trys.push([1, 6, 7, 8]);
1947
+ _a = __values(this.llmExecutionTools), _b = _a.next();
1948
+ _d.label = 2;
1949
+ case 2:
1950
+ if (!!_b.done) return [3 /*break*/, 5];
1951
+ llmExecutionTools = _b.value;
1952
+ return [4 /*yield*/, llmExecutionTools.listModels()];
1953
+ case 3:
1954
+ models = _d.sent();
1955
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
1956
+ _d.label = 4;
1957
+ case 4:
1958
+ _b = _a.next();
1959
+ return [3 /*break*/, 2];
1960
+ case 5: return [3 /*break*/, 8];
1961
+ case 6:
1962
+ e_1_1 = _d.sent();
1963
+ e_1 = { error: e_1_1 };
1964
+ return [3 /*break*/, 8];
1965
+ case 7:
1966
+ try {
1967
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
1968
+ }
1969
+ finally { if (e_1) throw e_1.error; }
1970
+ return [7 /*endfinally*/];
1971
+ case 8: return [2 /*return*/, availableModels];
1972
+ }
1973
+ });
1974
+ });
1975
+ };
1922
1976
  /**
1923
1977
  * Calls the best available chat model
1924
1978
  */
@@ -1945,8 +1999,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
1945
1999
  */
1946
2000
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
1947
2001
  return __awaiter(this, void 0, void 0, function () {
1948
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
1949
- var e_1, _d;
2002
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2003
+ var e_2, _d;
1950
2004
  var _this = this;
1951
2005
  return __generator(this, function (_e) {
1952
2006
  switch (_e.label) {
@@ -2002,14 +2056,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2002
2056
  return [3 /*break*/, 2];
2003
2057
  case 14: return [3 /*break*/, 17];
2004
2058
  case 15:
2005
- e_1_1 = _e.sent();
2006
- e_1 = { error: e_1_1 };
2059
+ e_2_1 = _e.sent();
2060
+ e_2 = { error: e_2_1 };
2007
2061
  return [3 /*break*/, 17];
2008
2062
  case 16:
2009
2063
  try {
2010
2064
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2011
2065
  }
2012
- finally { if (e_1) throw e_1.error; }
2066
+ finally { if (e_2) throw e_2.error; }
2013
2067
  return [7 /*endfinally*/];
2014
2068
  case 17:
2015
2069
  if (errors.length === 1) {
@@ -2037,50 +2091,6 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2037
2091
  });
2038
2092
  });
2039
2093
  };
2040
- /**
2041
- * List all available models that can be used
2042
- * This lists is a combination of all available models from all execution tools
2043
- */
2044
- MultipleLlmExecutionTools.prototype.listModels = function () {
2045
- return __awaiter(this, void 0, void 0, function () {
2046
- var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
2047
- var e_2, _c;
2048
- return __generator(this, function (_d) {
2049
- switch (_d.label) {
2050
- case 0:
2051
- availableModels = [];
2052
- _d.label = 1;
2053
- case 1:
2054
- _d.trys.push([1, 6, 7, 8]);
2055
- _a = __values(this.llmExecutionTools), _b = _a.next();
2056
- _d.label = 2;
2057
- case 2:
2058
- if (!!_b.done) return [3 /*break*/, 5];
2059
- llmExecutionTools = _b.value;
2060
- return [4 /*yield*/, llmExecutionTools.listModels()];
2061
- case 3:
2062
- models = _d.sent();
2063
- availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
2064
- _d.label = 4;
2065
- case 4:
2066
- _b = _a.next();
2067
- return [3 /*break*/, 2];
2068
- case 5: return [3 /*break*/, 8];
2069
- case 6:
2070
- e_2_1 = _d.sent();
2071
- e_2 = { error: e_2_1 };
2072
- return [3 /*break*/, 8];
2073
- case 7:
2074
- try {
2075
- if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2076
- }
2077
- finally { if (e_2) throw e_2.error; }
2078
- return [7 /*endfinally*/];
2079
- case 8: return [2 /*return*/, availableModels];
2080
- }
2081
- });
2082
- });
2083
- };
2084
2094
  return MultipleLlmExecutionTools;
2085
2095
  }());
2086
2096
  /**
@@ -3515,6 +3525,13 @@ function countTotalUsage(llmTools) {
3515
3525
  // TODO: [🧠] Maybe put here some suffix
3516
3526
  return llmTools.description;
3517
3527
  },
3528
+ checkConfiguration: function () {
3529
+ return __awaiter(this, void 0, void 0, function () {
3530
+ return __generator(this, function (_a) {
3531
+ return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
3532
+ });
3533
+ });
3534
+ },
3518
3535
  listModels: function () {
3519
3536
  return /* not await */ llmTools.listModels();
3520
3537
  },
@@ -6286,6 +6303,20 @@ var EnvironmentMismatchError = /** @class */ (function (_super) {
6286
6303
  return EnvironmentMismatchError;
6287
6304
  }(Error));
6288
6305
 
6306
+ /**
6307
+ * @@@
6308
+ *
6309
+ * Note: `$` is used to indicate that this function is not a pure function - it access global
6310
+ *
6311
+ * @public exported from `@promptbook/utils`
6312
+ */
6313
+ function $getGlobalScope() {
6314
+ return Function('return this')();
6315
+ }
6316
+ /***
6317
+ * TODO: !!!!! Make private and promptbook registry from this
6318
+ */
6319
+
6289
6320
  /**
6290
6321
  * Register is @@@
6291
6322
  *
@@ -6300,13 +6331,31 @@ var Register = /** @class */ (function () {
6300
6331
  return this.storage;
6301
6332
  };
6302
6333
  Register.prototype.register = function (registered) {
6303
- // !!!!!! <- TODO: What to return here
6304
- // TODO: !!!!!! Compare if same is not already registered
6305
- this.storage.push(registered);
6334
+ // <- TODO: What to return here
6335
+ var packageName = registered.packageName, className = registered.className;
6336
+ var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
6337
+ var existingRegistration = this.storage[existingRegistrationIndex];
6338
+ if (!existingRegistration) {
6339
+ console.warn("[\uD83D\uDCE6] Registering ".concat(packageName, ".").concat(className, " again"));
6340
+ this.storage.push(registered);
6341
+ }
6342
+ else {
6343
+ console.warn("[\uD83D\uDCE6] Re-registering ".concat(packageName, ".").concat(className, " again"));
6344
+ this.storage[existingRegistrationIndex] = registered;
6345
+ }
6306
6346
  };
6307
6347
  return Register;
6308
6348
  }());
6309
6349
 
6350
+ // TODO: !!!!!! Move this logic to Register and rename to $Register
6351
+ var globalScope = $getGlobalScope();
6352
+ if (globalScope.$llmToolsMetadataRegister === undefined) {
6353
+ globalScope.$llmToolsMetadataRegister = [];
6354
+ }
6355
+ else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
6356
+ throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
6357
+ }
6358
+ var _ = globalScope.$llmToolsMetadataRegister;
6310
6359
  /**
6311
6360
  * @@@
6312
6361
  *
@@ -6314,9 +6363,8 @@ var Register = /** @class */ (function () {
6314
6363
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
6315
6364
  * @public exported from `@promptbook/core`
6316
6365
  */
6317
- var $llmToolsMetadataRegister = new Register([
6318
- // TODO: !!!!!! Take from global scope
6319
- ]);
6366
+ var $llmToolsMetadataRegister = new Register(_);
6367
+ $getGlobalScope().$llmToolsMetadataRegister;
6320
6368
 
6321
6369
  /**
6322
6370
  * @@@
@@ -6379,6 +6427,29 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6379
6427
  enumerable: false,
6380
6428
  configurable: true
6381
6429
  });
6430
+ /**
6431
+ * Check the configuration of all execution tools
6432
+ */
6433
+ RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6434
+ return __awaiter(this, void 0, void 0, function () {
6435
+ return __generator(this, function (_a) {
6436
+ return [2 /*return*/];
6437
+ });
6438
+ });
6439
+ };
6440
+ /**
6441
+ * List all available models that can be used
6442
+ */
6443
+ RemoteLlmExecutionTools.prototype.listModels = function () {
6444
+ return __awaiter(this, void 0, void 0, function () {
6445
+ return __generator(this, function (_a) {
6446
+ return [2 /*return*/, (this.options.models ||
6447
+ [
6448
+ /* !!!!!! */
6449
+ ])];
6450
+ });
6451
+ });
6452
+ };
6382
6453
  /**
6383
6454
  * Creates a connection to the remote proxy server.
6384
6455
  */
@@ -6473,19 +6544,6 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6473
6544
  });
6474
6545
  });
6475
6546
  };
6476
- /**
6477
- * List all available models that can be used
6478
- */
6479
- RemoteLlmExecutionTools.prototype.listModels = function () {
6480
- return __awaiter(this, void 0, void 0, function () {
6481
- return __generator(this, function (_a) {
6482
- return [2 /*return*/, (this.options.models ||
6483
- [
6484
- /* !!! */
6485
- ])];
6486
- });
6487
- });
6488
- };
6489
6547
  return RemoteLlmExecutionTools;
6490
6548
  }());
6491
6549
  /**
@@ -6682,12 +6740,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6682
6740
  function AnthropicClaudeExecutionTools(options) {
6683
6741
  if (options === void 0) { options = { isProxied: false }; }
6684
6742
  this.options = options;
6685
- // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
6686
- var anthropicOptions = __assign({}, options);
6687
- delete anthropicOptions.isVerbose;
6688
- delete anthropicOptions.isProxied;
6689
- this.client = new Anthropic(anthropicOptions);
6690
- // <- TODO: !!!!!! Lazy-load client
6743
+ /**
6744
+ * Anthropic Claude API client.
6745
+ */
6746
+ this.client = null;
6691
6747
  }
6692
6748
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6693
6749
  get: function () {
@@ -6703,12 +6759,47 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6703
6759
  enumerable: false,
6704
6760
  configurable: true
6705
6761
  });
6762
+ AnthropicClaudeExecutionTools.prototype.getClient = function () {
6763
+ return __awaiter(this, void 0, void 0, function () {
6764
+ var anthropicOptions;
6765
+ return __generator(this, function (_a) {
6766
+ if (this.client === null) {
6767
+ anthropicOptions = __assign({}, this.options);
6768
+ delete anthropicOptions.isVerbose;
6769
+ delete anthropicOptions.isProxied;
6770
+ this.client = new Anthropic(anthropicOptions);
6771
+ }
6772
+ return [2 /*return*/, this.client];
6773
+ });
6774
+ });
6775
+ };
6776
+ /**
6777
+ * Check the `options` passed to `constructor`
6778
+ */
6779
+ AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6780
+ return __awaiter(this, void 0, void 0, function () {
6781
+ return __generator(this, function (_a) {
6782
+ switch (_a.label) {
6783
+ case 0: return [4 /*yield*/, this.getClient()];
6784
+ case 1:
6785
+ _a.sent();
6786
+ return [2 /*return*/];
6787
+ }
6788
+ });
6789
+ });
6790
+ };
6791
+ /**
6792
+ * List all available Anthropic Claude models that can be used
6793
+ */
6794
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
6795
+ return ANTHROPIC_CLAUDE_MODELS;
6796
+ };
6706
6797
  /**
6707
6798
  * Calls Anthropic Claude API to use a chat model.
6708
6799
  */
6709
6800
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6710
6801
  return __awaiter(this, void 0, void 0, function () {
6711
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6802
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6712
6803
  return __generator(this, function (_a) {
6713
6804
  switch (_a.label) {
6714
6805
  case 0:
@@ -6716,6 +6807,9 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6716
6807
  console.info('💬 Anthropic Claude callChatModel call');
6717
6808
  }
6718
6809
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6810
+ return [4 /*yield*/, this.getClient()];
6811
+ case 1:
6812
+ client = _a.sent();
6719
6813
  // TODO: [☂] Use here more modelRequirements
6720
6814
  if (modelRequirements.modelVariant !== 'CHAT') {
6721
6815
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -6742,8 +6836,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6742
6836
  if (this.options.isVerbose) {
6743
6837
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6744
6838
  }
6745
- return [4 /*yield*/, this.client.messages.create(rawRequest)];
6746
- case 1:
6839
+ return [4 /*yield*/, client.messages.create(rawRequest)];
6840
+ case 2:
6747
6841
  rawResponse = _a.sent();
6748
6842
  if (this.options.isVerbose) {
6749
6843
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -6874,13 +6968,6 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6874
6968
  AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
6875
6969
  return this.getDefaultModel('claude-3-opus');
6876
6970
  };
6877
- // <- Note: [🤖] getDefaultXxxModel
6878
- /**
6879
- * List all available Anthropic Claude models that can be used
6880
- */
6881
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
6882
- return ANTHROPIC_CLAUDE_MODELS;
6883
- };
6884
6971
  return AnthropicClaudeExecutionTools;
6885
6972
  }());
6886
6973
  /**
@@ -7287,10 +7374,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7287
7374
  */
7288
7375
  function AzureOpenAiExecutionTools(options) {
7289
7376
  this.options = options;
7290
- this.client = new OpenAIClient(
7291
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7292
- "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7293
- // <- TODO: !!!!!! Lazy-load client
7377
+ /**
7378
+ * OpenAI Azure API client.
7379
+ */
7380
+ this.client = null;
7294
7381
  }
7295
7382
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7296
7383
  get: function () {
@@ -7306,28 +7393,74 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7306
7393
  enumerable: false,
7307
7394
  configurable: true
7308
7395
  });
7396
+ AzureOpenAiExecutionTools.prototype.getClient = function () {
7397
+ return __awaiter(this, void 0, void 0, function () {
7398
+ return __generator(this, function (_a) {
7399
+ if (this.client === null) {
7400
+ this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
7401
+ }
7402
+ return [2 /*return*/, this.client];
7403
+ });
7404
+ });
7405
+ };
7406
+ /**
7407
+ * Check the `options` passed to `constructor`
7408
+ */
7409
+ AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7410
+ return __awaiter(this, void 0, void 0, function () {
7411
+ return __generator(this, function (_a) {
7412
+ switch (_a.label) {
7413
+ case 0: return [4 /*yield*/, this.getClient()];
7414
+ case 1:
7415
+ _a.sent();
7416
+ return [2 /*return*/];
7417
+ }
7418
+ });
7419
+ });
7420
+ };
7421
+ /**
7422
+ * List all available Azure OpenAI models that can be used
7423
+ */
7424
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
7425
+ return __awaiter(this, void 0, void 0, function () {
7426
+ return __generator(this, function (_a) {
7427
+ // TODO: !!! Do here some filtering which models are really available as deployment
7428
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7429
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7430
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7431
+ return ({
7432
+ modelTitle: "Azure ".concat(modelTitle),
7433
+ modelName: modelName,
7434
+ modelVariant: modelVariant,
7435
+ });
7436
+ })];
7437
+ });
7438
+ });
7439
+ };
7309
7440
  /**
7310
7441
  * Calls OpenAI API to use a chat model.
7311
7442
  */
7312
7443
  AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7313
7444
  var _a, _b;
7314
7445
  return __awaiter(this, void 0, void 0, function () {
7315
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7316
- var _c;
7317
- return __generator(this, function (_d) {
7318
- switch (_d.label) {
7446
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7447
+ return __generator(this, function (_c) {
7448
+ switch (_c.label) {
7319
7449
  case 0:
7320
7450
  if (this.options.isVerbose) {
7321
7451
  console.info('💬 OpenAI callChatModel call');
7322
7452
  }
7323
7453
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7454
+ return [4 /*yield*/, this.getClient()];
7455
+ case 1:
7456
+ client = _c.sent();
7324
7457
  // TODO: [☂] Use here more modelRequirements
7325
7458
  if (modelRequirements.modelVariant !== 'CHAT') {
7326
7459
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7327
7460
  }
7328
- _d.label = 1;
7329
- case 1:
7330
- _d.trys.push([1, 3, , 4]);
7461
+ _c.label = 2;
7462
+ case 2:
7463
+ _c.trys.push([2, 4, , 5]);
7331
7464
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7332
7465
  modelSettings = {
7333
7466
  maxTokens: modelRequirements.maxTokens,
@@ -7357,9 +7490,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7357
7490
  console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7358
7491
  }
7359
7492
  rawRequest = [modelName, messages, modelSettings];
7360
- return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7361
- case 2:
7362
- rawResponse = _d.sent();
7493
+ return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7494
+ case 3:
7495
+ rawResponse = _c.sent();
7363
7496
  if (this.options.isVerbose) {
7364
7497
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7365
7498
  }
@@ -7394,10 +7527,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7394
7527
  rawResponse: rawResponse,
7395
7528
  // <- [🗯]
7396
7529
  }];
7397
- case 3:
7398
- error_1 = _d.sent();
7530
+ case 4:
7531
+ error_1 = _c.sent();
7399
7532
  throw this.transformAzureError(error_1);
7400
- case 4: return [2 /*return*/];
7533
+ case 5: return [2 /*return*/];
7401
7534
  }
7402
7535
  });
7403
7536
  });
@@ -7408,22 +7541,24 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7408
7541
  AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7409
7542
  var _a, _b;
7410
7543
  return __awaiter(this, void 0, void 0, function () {
7411
- var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7412
- var _c;
7413
- return __generator(this, function (_d) {
7414
- switch (_d.label) {
7544
+ var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7545
+ return __generator(this, function (_c) {
7546
+ switch (_c.label) {
7415
7547
  case 0:
7416
7548
  if (this.options.isVerbose) {
7417
7549
  console.info('🖋 OpenAI callCompletionModel call');
7418
7550
  }
7419
7551
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7552
+ return [4 /*yield*/, this.getClient()];
7553
+ case 1:
7554
+ client = _c.sent();
7420
7555
  // TODO: [☂] Use here more modelRequirements
7421
7556
  if (modelRequirements.modelVariant !== 'COMPLETION') {
7422
7557
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7423
7558
  }
7424
- _d.label = 1;
7425
- case 1:
7426
- _d.trys.push([1, 3, , 4]);
7559
+ _c.label = 2;
7560
+ case 2:
7561
+ _c.trys.push([2, 4, , 5]);
7427
7562
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7428
7563
  modelSettings = {
7429
7564
  maxTokens: modelRequirements.maxTokens || 2000,
@@ -7445,9 +7580,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7445
7580
  [rawPromptContent],
7446
7581
  modelSettings,
7447
7582
  ];
7448
- return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7449
- case 2:
7450
- rawResponse = _d.sent();
7583
+ return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7584
+ case 3:
7585
+ rawResponse = _c.sent();
7451
7586
  if (this.options.isVerbose) {
7452
7587
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7453
7588
  }
@@ -7479,10 +7614,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7479
7614
  rawResponse: rawResponse,
7480
7615
  // <- [🗯]
7481
7616
  }];
7482
- case 3:
7483
- error_2 = _d.sent();
7617
+ case 4:
7618
+ error_2 = _c.sent();
7484
7619
  throw this.transformAzureError(error_2);
7485
- case 4: return [2 /*return*/];
7620
+ case 5: return [2 /*return*/];
7486
7621
  }
7487
7622
  });
7488
7623
  });
@@ -7498,25 +7633,6 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7498
7633
  var code = azureError.code, message = azureError.message;
7499
7634
  return new PipelineExecutionError("".concat(code, ": ").concat(message));
7500
7635
  };
7501
- /**
7502
- * List all available Azure OpenAI models that can be used
7503
- */
7504
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7505
- return __awaiter(this, void 0, void 0, function () {
7506
- return __generator(this, function (_a) {
7507
- // TODO: !!! Do here some filtering which models are really available as deployment
7508
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7509
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7510
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7511
- return ({
7512
- modelTitle: "Azure ".concat(modelTitle),
7513
- modelName: modelName,
7514
- modelVariant: modelVariant,
7515
- });
7516
- })];
7517
- });
7518
- });
7519
- };
7520
7636
  return AzureOpenAiExecutionTools;
7521
7637
  }());
7522
7638
  /**
@@ -7578,12 +7694,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
7578
7694
  function OpenAiExecutionTools(options) {
7579
7695
  if (options === void 0) { options = {}; }
7580
7696
  this.options = options;
7581
- // Note: Passing only OpenAI relevant options to OpenAI constructor
7582
- var openAiOptions = __assign({}, options);
7583
- delete openAiOptions.isVerbose;
7584
- delete openAiOptions.user;
7585
- this.client = new OpenAI(__assign({}, openAiOptions));
7586
- // <- TODO: !!!!!! Lazy-load client
7697
+ /**
7698
+ * OpenAI API client.
7699
+ */
7700
+ this.client = null;
7587
7701
  }
7588
7702
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7589
7703
  get: function () {
@@ -7599,12 +7713,54 @@ var OpenAiExecutionTools = /** @class */ (function () {
7599
7713
  enumerable: false,
7600
7714
  configurable: true
7601
7715
  });
7716
+ OpenAiExecutionTools.prototype.getClient = function () {
7717
+ return __awaiter(this, void 0, void 0, function () {
7718
+ var openAiOptions;
7719
+ return __generator(this, function (_a) {
7720
+ if (this.client === null) {
7721
+ openAiOptions = __assign({}, this.options);
7722
+ delete openAiOptions.isVerbose;
7723
+ delete openAiOptions.user;
7724
+ this.client = new OpenAI(__assign({}, openAiOptions));
7725
+ }
7726
+ return [2 /*return*/, this.client];
7727
+ });
7728
+ });
7729
+ };
7730
+ /**
7731
+ * Check the `options` passed to `constructor`
7732
+ */
7733
+ OpenAiExecutionTools.prototype.checkConfiguration = function () {
7734
+ return __awaiter(this, void 0, void 0, function () {
7735
+ return __generator(this, function (_a) {
7736
+ switch (_a.label) {
7737
+ case 0: return [4 /*yield*/, this.getClient()];
7738
+ case 1:
7739
+ _a.sent();
7740
+ return [2 /*return*/];
7741
+ }
7742
+ });
7743
+ });
7744
+ };
7745
+ /**
7746
+ * List all available OpenAI models that can be used
7747
+ */
7748
+ OpenAiExecutionTools.prototype.listModels = function () {
7749
+ /*
7750
+ Note: Dynamic lising of the models
7751
+ const models = await this.openai.models.list({});
7752
+
7753
+ console.log({ models });
7754
+ console.log(models.data);
7755
+ */
7756
+ return OPENAI_MODELS;
7757
+ };
7602
7758
  /**
7603
7759
  * Calls OpenAI API to use a chat model.
7604
7760
  */
7605
7761
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7606
7762
  return __awaiter(this, void 0, void 0, function () {
7607
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7763
+ var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7608
7764
  return __generator(this, function (_a) {
7609
7765
  switch (_a.label) {
7610
7766
  case 0:
@@ -7612,6 +7768,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7612
7768
  console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7613
7769
  }
7614
7770
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7771
+ return [4 /*yield*/, this.getClient()];
7772
+ case 1:
7773
+ client = _a.sent();
7615
7774
  // TODO: [☂] Use here more modelRequirements
7616
7775
  if (modelRequirements.modelVariant !== 'CHAT') {
7617
7776
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -7648,8 +7807,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7648
7807
  if (this.options.isVerbose) {
7649
7808
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7650
7809
  }
7651
- return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
7652
- case 1:
7810
+ return [4 /*yield*/, client.chat.completions.create(rawRequest)];
7811
+ case 2:
7653
7812
  rawResponse = _a.sent();
7654
7813
  if (this.options.isVerbose) {
7655
7814
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7690,7 +7849,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7690
7849
  */
7691
7850
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7692
7851
  return __awaiter(this, void 0, void 0, function () {
7693
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7852
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7694
7853
  return __generator(this, function (_a) {
7695
7854
  switch (_a.label) {
7696
7855
  case 0:
@@ -7698,6 +7857,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7698
7857
  console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
7699
7858
  }
7700
7859
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7860
+ return [4 /*yield*/, this.getClient()];
7861
+ case 1:
7862
+ client = _a.sent();
7701
7863
  // TODO: [☂] Use here more modelRequirements
7702
7864
  if (modelRequirements.modelVariant !== 'COMPLETION') {
7703
7865
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
@@ -7717,8 +7879,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7717
7879
  if (this.options.isVerbose) {
7718
7880
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7719
7881
  }
7720
- return [4 /*yield*/, this.client.completions.create(rawRequest)];
7721
- case 1:
7882
+ return [4 /*yield*/, client.completions.create(rawRequest)];
7883
+ case 2:
7722
7884
  rawResponse = _a.sent();
7723
7885
  if (this.options.isVerbose) {
7724
7886
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7756,7 +7918,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7756
7918
  */
7757
7919
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
7758
7920
  return __awaiter(this, void 0, void 0, function () {
7759
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7921
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7760
7922
  return __generator(this, function (_a) {
7761
7923
  switch (_a.label) {
7762
7924
  case 0:
@@ -7764,6 +7926,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7764
7926
  console.info('🖋 OpenAI embedding call', { prompt: prompt });
7765
7927
  }
7766
7928
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7929
+ return [4 /*yield*/, this.getClient()];
7930
+ case 1:
7931
+ client = _a.sent();
7767
7932
  // TODO: [☂] Use here more modelRequirements
7768
7933
  if (modelRequirements.modelVariant !== 'EMBEDDING') {
7769
7934
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
@@ -7778,8 +7943,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7778
7943
  if (this.options.isVerbose) {
7779
7944
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7780
7945
  }
7781
- return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
7782
- case 1:
7946
+ return [4 /*yield*/, client.embeddings.create(rawRequest)];
7947
+ case 2:
7783
7948
  rawResponse = _a.sent();
7784
7949
  if (this.options.isVerbose) {
7785
7950
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7845,20 +8010,6 @@ var OpenAiExecutionTools = /** @class */ (function () {
7845
8010
  OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
7846
8011
  return this.getDefaultModel('text-embedding-3-large');
7847
8012
  };
7848
- // <- Note: [🤖] getDefaultXxxModel
7849
- /**
7850
- * List all available OpenAI models that can be used
7851
- */
7852
- OpenAiExecutionTools.prototype.listModels = function () {
7853
- /*
7854
- Note: Dynamic lising of the models
7855
- const models = await this.openai.models.list({});
7856
-
7857
- console.log({ models });
7858
- console.log(models.data);
7859
- */
7860
- return OPENAI_MODELS;
7861
- };
7862
8013
  return OpenAiExecutionTools;
7863
8014
  }());
7864
8015
  /**
@@ -7959,7 +8110,7 @@ function createLlmToolsFromEnv(options) {
7959
8110
  var configuration = createLlmToolsFromConfigurationFromEnv();
7960
8111
  if (configuration.length === 0) {
7961
8112
  // TODO: [🥃]
7962
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
8113
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n !!!!!!! List all available LLM tools in your environment\n - Azure \n - OpenAI (not imported)\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
7963
8114
  }
7964
8115
  return createLlmToolsFromConfiguration(configuration, options);
7965
8116
  }