@promptbook/cli 0.66.0-6 → 0.66.0-8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. package/bin/promptbook-cli.js +2 -2
  2. package/esm/index.es.js +368 -172
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/azure-openai.index.d.ts +4 -0
  5. package/esm/typings/src/_packages/cli.index.d.ts +4 -2
  6. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  7. package/esm/typings/src/cli/main.d.ts +2 -2
  8. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -0
  9. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
  10. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
  11. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
  12. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +1 -0
  13. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +10 -5
  14. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +10 -5
  15. package/esm/typings/src/llm-providers/azure-openai/createAzureOpenAiExecutionTools.d.ts +15 -0
  16. package/esm/typings/src/llm-providers/azure-openai/register-configuration.d.ts +9 -0
  17. package/esm/typings/src/llm-providers/azure-openai/register-constructor.d.ts +11 -0
  18. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +8 -4
  19. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +8 -4
  20. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +9 -5
  21. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +10 -5
  22. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +8 -4
  23. package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
  24. package/package.json +1 -1
  25. package/umd/index.umd.js +369 -172
  26. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ import glob from 'glob-promise';
20
20
  /**
21
21
  * The version of the Promptbook library
22
22
  */
23
- var PROMPTBOOK_VERSION = '0.66.0-5';
23
+ var PROMPTBOOK_VERSION = '0.66.0-7';
24
24
  // TODO: !!!! List here all the versions and annotate + put into script
25
25
 
26
26
  /*! *****************************************************************************
@@ -847,7 +847,7 @@ function forEachAsync(array, options, callbackfunction) {
847
847
  });
848
848
  }
849
849
 
850
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-5",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
850
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
851
851
 
852
852
  /**
853
853
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2070,6 +2070,60 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2070
2070
  enumerable: false,
2071
2071
  configurable: true
2072
2072
  });
2073
+ /**
2074
+ * Check the configuration of all execution tools
2075
+ */
2076
+ MultipleLlmExecutionTools.prototype.checkConfiguration = function () {
2077
+ return __awaiter(this, void 0, void 0, function () {
2078
+ return __generator(this, function (_a) {
2079
+ return [2 /*return*/];
2080
+ });
2081
+ });
2082
+ };
2083
+ /**
2084
+ * List all available models that can be used
2085
+ * This lists is a combination of all available models from all execution tools
2086
+ */
2087
+ MultipleLlmExecutionTools.prototype.listModels = function () {
2088
+ return __awaiter(this, void 0, void 0, function () {
2089
+ var availableModels, _a, _b, llmExecutionTools, models, e_1_1;
2090
+ var e_1, _c;
2091
+ return __generator(this, function (_d) {
2092
+ switch (_d.label) {
2093
+ case 0:
2094
+ availableModels = [];
2095
+ _d.label = 1;
2096
+ case 1:
2097
+ _d.trys.push([1, 6, 7, 8]);
2098
+ _a = __values(this.llmExecutionTools), _b = _a.next();
2099
+ _d.label = 2;
2100
+ case 2:
2101
+ if (!!_b.done) return [3 /*break*/, 5];
2102
+ llmExecutionTools = _b.value;
2103
+ return [4 /*yield*/, llmExecutionTools.listModels()];
2104
+ case 3:
2105
+ models = _d.sent();
2106
+ availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
2107
+ _d.label = 4;
2108
+ case 4:
2109
+ _b = _a.next();
2110
+ return [3 /*break*/, 2];
2111
+ case 5: return [3 /*break*/, 8];
2112
+ case 6:
2113
+ e_1_1 = _d.sent();
2114
+ e_1 = { error: e_1_1 };
2115
+ return [3 /*break*/, 8];
2116
+ case 7:
2117
+ try {
2118
+ if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2119
+ }
2120
+ finally { if (e_1) throw e_1.error; }
2121
+ return [7 /*endfinally*/];
2122
+ case 8: return [2 /*return*/, availableModels];
2123
+ }
2124
+ });
2125
+ });
2126
+ };
2073
2127
  /**
2074
2128
  * Calls the best available chat model
2075
2129
  */
@@ -2096,8 +2150,8 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2096
2150
  */
2097
2151
  MultipleLlmExecutionTools.prototype.callCommonModel = function (prompt) {
2098
2152
  return __awaiter(this, void 0, void 0, function () {
2099
- var errors, _a, _b, llmExecutionTools, _c, error_1, e_1_1;
2100
- var e_1, _d;
2153
+ var errors, _a, _b, llmExecutionTools, _c, error_1, e_2_1;
2154
+ var e_2, _d;
2101
2155
  var _this = this;
2102
2156
  return __generator(this, function (_e) {
2103
2157
  switch (_e.label) {
@@ -2153,14 +2207,14 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2153
2207
  return [3 /*break*/, 2];
2154
2208
  case 14: return [3 /*break*/, 17];
2155
2209
  case 15:
2156
- e_1_1 = _e.sent();
2157
- e_1 = { error: e_1_1 };
2210
+ e_2_1 = _e.sent();
2211
+ e_2 = { error: e_2_1 };
2158
2212
  return [3 /*break*/, 17];
2159
2213
  case 16:
2160
2214
  try {
2161
2215
  if (_b && !_b.done && (_d = _a.return)) _d.call(_a);
2162
2216
  }
2163
- finally { if (e_1) throw e_1.error; }
2217
+ finally { if (e_2) throw e_2.error; }
2164
2218
  return [7 /*endfinally*/];
2165
2219
  case 17:
2166
2220
  if (errors.length === 1) {
@@ -2188,50 +2242,6 @@ var MultipleLlmExecutionTools = /** @class */ (function () {
2188
2242
  });
2189
2243
  });
2190
2244
  };
2191
- /**
2192
- * List all available models that can be used
2193
- * This lists is a combination of all available models from all execution tools
2194
- */
2195
- MultipleLlmExecutionTools.prototype.listModels = function () {
2196
- return __awaiter(this, void 0, void 0, function () {
2197
- var availableModels, _a, _b, llmExecutionTools, models, e_2_1;
2198
- var e_2, _c;
2199
- return __generator(this, function (_d) {
2200
- switch (_d.label) {
2201
- case 0:
2202
- availableModels = [];
2203
- _d.label = 1;
2204
- case 1:
2205
- _d.trys.push([1, 6, 7, 8]);
2206
- _a = __values(this.llmExecutionTools), _b = _a.next();
2207
- _d.label = 2;
2208
- case 2:
2209
- if (!!_b.done) return [3 /*break*/, 5];
2210
- llmExecutionTools = _b.value;
2211
- return [4 /*yield*/, llmExecutionTools.listModels()];
2212
- case 3:
2213
- models = _d.sent();
2214
- availableModels.push.apply(availableModels, __spreadArray([], __read(models), false));
2215
- _d.label = 4;
2216
- case 4:
2217
- _b = _a.next();
2218
- return [3 /*break*/, 2];
2219
- case 5: return [3 /*break*/, 8];
2220
- case 6:
2221
- e_2_1 = _d.sent();
2222
- e_2 = { error: e_2_1 };
2223
- return [3 /*break*/, 8];
2224
- case 7:
2225
- try {
2226
- if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
2227
- }
2228
- finally { if (e_2) throw e_2.error; }
2229
- return [7 /*endfinally*/];
2230
- case 8: return [2 /*return*/, availableModels];
2231
- }
2232
- });
2233
- });
2234
- };
2235
2245
  return MultipleLlmExecutionTools;
2236
2246
  }());
2237
2247
  /**
@@ -3666,6 +3676,13 @@ function countTotalUsage(llmTools) {
3666
3676
  // TODO: [🧠] Maybe put here some suffix
3667
3677
  return llmTools.description;
3668
3678
  },
3679
+ checkConfiguration: function () {
3680
+ return __awaiter(this, void 0, void 0, function () {
3681
+ return __generator(this, function (_a) {
3682
+ return [2 /*return*/, /* not await */ llmTools.checkConfiguration()];
3683
+ });
3684
+ });
3685
+ },
3669
3686
  listModels: function () {
3670
3687
  return /* not await */ llmTools.listModels();
3671
3688
  },
@@ -6613,6 +6630,29 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6613
6630
  enumerable: false,
6614
6631
  configurable: true
6615
6632
  });
6633
+ /**
6634
+ * Check the configuration of all execution tools
6635
+ */
6636
+ RemoteLlmExecutionTools.prototype.checkConfiguration = function () {
6637
+ return __awaiter(this, void 0, void 0, function () {
6638
+ return __generator(this, function (_a) {
6639
+ return [2 /*return*/];
6640
+ });
6641
+ });
6642
+ };
6643
+ /**
6644
+ * List all available models that can be used
6645
+ */
6646
+ RemoteLlmExecutionTools.prototype.listModels = function () {
6647
+ return __awaiter(this, void 0, void 0, function () {
6648
+ return __generator(this, function (_a) {
6649
+ return [2 /*return*/, (this.options.models ||
6650
+ [
6651
+ /* !!!!!! */
6652
+ ])];
6653
+ });
6654
+ });
6655
+ };
6616
6656
  /**
6617
6657
  * Creates a connection to the remote proxy server.
6618
6658
  */
@@ -6707,19 +6747,6 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6707
6747
  });
6708
6748
  });
6709
6749
  };
6710
- /**
6711
- * List all available models that can be used
6712
- */
6713
- RemoteLlmExecutionTools.prototype.listModels = function () {
6714
- return __awaiter(this, void 0, void 0, function () {
6715
- return __generator(this, function (_a) {
6716
- return [2 /*return*/, (this.options.models ||
6717
- [
6718
- /* !!! */
6719
- ])];
6720
- });
6721
- });
6722
- };
6723
6750
  return RemoteLlmExecutionTools;
6724
6751
  }());
6725
6752
  /**
@@ -6916,12 +6943,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6916
6943
  function AnthropicClaudeExecutionTools(options) {
6917
6944
  if (options === void 0) { options = { isProxied: false }; }
6918
6945
  this.options = options;
6919
- // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
6920
- var anthropicOptions = __assign({}, options);
6921
- delete anthropicOptions.isVerbose;
6922
- delete anthropicOptions.isProxied;
6923
- this.client = new Anthropic(anthropicOptions);
6924
- // <- TODO: !!!!!! Lazy-load client
6946
+ /**
6947
+ * Anthropic Claude API client.
6948
+ */
6949
+ this.client = null;
6925
6950
  }
6926
6951
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6927
6952
  get: function () {
@@ -6937,12 +6962,47 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6937
6962
  enumerable: false,
6938
6963
  configurable: true
6939
6964
  });
6965
+ AnthropicClaudeExecutionTools.prototype.getClient = function () {
6966
+ return __awaiter(this, void 0, void 0, function () {
6967
+ var anthropicOptions;
6968
+ return __generator(this, function (_a) {
6969
+ if (this.client === null) {
6970
+ anthropicOptions = __assign({}, this.options);
6971
+ delete anthropicOptions.isVerbose;
6972
+ delete anthropicOptions.isProxied;
6973
+ this.client = new Anthropic(anthropicOptions);
6974
+ }
6975
+ return [2 /*return*/, this.client];
6976
+ });
6977
+ });
6978
+ };
6979
+ /**
6980
+ * Check the `options` passed to `constructor`
6981
+ */
6982
+ AnthropicClaudeExecutionTools.prototype.checkConfiguration = function () {
6983
+ return __awaiter(this, void 0, void 0, function () {
6984
+ return __generator(this, function (_a) {
6985
+ switch (_a.label) {
6986
+ case 0: return [4 /*yield*/, this.getClient()];
6987
+ case 1:
6988
+ _a.sent();
6989
+ return [2 /*return*/];
6990
+ }
6991
+ });
6992
+ });
6993
+ };
6994
+ /**
6995
+ * List all available Anthropic Claude models that can be used
6996
+ */
6997
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
6998
+ return ANTHROPIC_CLAUDE_MODELS;
6999
+ };
6940
7000
  /**
6941
7001
  * Calls Anthropic Claude API to use a chat model.
6942
7002
  */
6943
7003
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6944
7004
  return __awaiter(this, void 0, void 0, function () {
6945
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
7005
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6946
7006
  return __generator(this, function (_a) {
6947
7007
  switch (_a.label) {
6948
7008
  case 0:
@@ -6950,6 +7010,9 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6950
7010
  console.info('💬 Anthropic Claude callChatModel call');
6951
7011
  }
6952
7012
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7013
+ return [4 /*yield*/, this.getClient()];
7014
+ case 1:
7015
+ client = _a.sent();
6953
7016
  // TODO: [☂] Use here more modelRequirements
6954
7017
  if (modelRequirements.modelVariant !== 'CHAT') {
6955
7018
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -6976,8 +7039,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6976
7039
  if (this.options.isVerbose) {
6977
7040
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6978
7041
  }
6979
- return [4 /*yield*/, this.client.messages.create(rawRequest)];
6980
- case 1:
7042
+ return [4 /*yield*/, client.messages.create(rawRequest)];
7043
+ case 2:
6981
7044
  rawResponse = _a.sent();
6982
7045
  if (this.options.isVerbose) {
6983
7046
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7108,13 +7171,6 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
7108
7171
  AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
7109
7172
  return this.getDefaultModel('claude-3-opus');
7110
7173
  };
7111
- // <- Note: [🤖] getDefaultXxxModel
7112
- /**
7113
- * List all available Anthropic Claude models that can be used
7114
- */
7115
- AnthropicClaudeExecutionTools.prototype.listModels = function () {
7116
- return ANTHROPIC_CLAUDE_MODELS;
7117
- };
7118
7174
  return AnthropicClaudeExecutionTools;
7119
7175
  }());
7120
7176
  /**
@@ -7521,10 +7577,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7521
7577
  */
7522
7578
  function AzureOpenAiExecutionTools(options) {
7523
7579
  this.options = options;
7524
- this.client = new OpenAIClient(
7525
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7526
- "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7527
- // <- TODO: !!!!!! Lazy-load client
7580
+ /**
7581
+ * OpenAI Azure API client.
7582
+ */
7583
+ this.client = null;
7528
7584
  }
7529
7585
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7530
7586
  get: function () {
@@ -7540,28 +7596,74 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7540
7596
  enumerable: false,
7541
7597
  configurable: true
7542
7598
  });
7599
+ AzureOpenAiExecutionTools.prototype.getClient = function () {
7600
+ return __awaiter(this, void 0, void 0, function () {
7601
+ return __generator(this, function (_a) {
7602
+ if (this.client === null) {
7603
+ this.client = new OpenAIClient("https://".concat(this.options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(this.options.apiKey));
7604
+ }
7605
+ return [2 /*return*/, this.client];
7606
+ });
7607
+ });
7608
+ };
7609
+ /**
7610
+ * Check the `options` passed to `constructor`
7611
+ */
7612
+ AzureOpenAiExecutionTools.prototype.checkConfiguration = function () {
7613
+ return __awaiter(this, void 0, void 0, function () {
7614
+ return __generator(this, function (_a) {
7615
+ switch (_a.label) {
7616
+ case 0: return [4 /*yield*/, this.getClient()];
7617
+ case 1:
7618
+ _a.sent();
7619
+ return [2 /*return*/];
7620
+ }
7621
+ });
7622
+ });
7623
+ };
7624
+ /**
7625
+ * List all available Azure OpenAI models that can be used
7626
+ */
7627
+ AzureOpenAiExecutionTools.prototype.listModels = function () {
7628
+ return __awaiter(this, void 0, void 0, function () {
7629
+ return __generator(this, function (_a) {
7630
+ // TODO: !!! Do here some filtering which models are really available as deployment
7631
+ // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7632
+ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7633
+ var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7634
+ return ({
7635
+ modelTitle: "Azure ".concat(modelTitle),
7636
+ modelName: modelName,
7637
+ modelVariant: modelVariant,
7638
+ });
7639
+ })];
7640
+ });
7641
+ });
7642
+ };
7543
7643
  /**
7544
7644
  * Calls OpenAI API to use a chat model.
7545
7645
  */
7546
7646
  AzureOpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7547
7647
  var _a, _b;
7548
7648
  return __awaiter(this, void 0, void 0, function () {
7549
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7550
- var _c;
7551
- return __generator(this, function (_d) {
7552
- switch (_d.label) {
7649
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, messages, start, complete, rawRequest, rawResponse, resultContent, usage, error_1;
7650
+ return __generator(this, function (_c) {
7651
+ switch (_c.label) {
7553
7652
  case 0:
7554
7653
  if (this.options.isVerbose) {
7555
7654
  console.info('💬 OpenAI callChatModel call');
7556
7655
  }
7557
7656
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7657
+ return [4 /*yield*/, this.getClient()];
7658
+ case 1:
7659
+ client = _c.sent();
7558
7660
  // TODO: [☂] Use here more modelRequirements
7559
7661
  if (modelRequirements.modelVariant !== 'CHAT') {
7560
7662
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
7561
7663
  }
7562
- _d.label = 1;
7563
- case 1:
7564
- _d.trys.push([1, 3, , 4]);
7664
+ _c.label = 2;
7665
+ case 2:
7666
+ _c.trys.push([2, 4, , 5]);
7565
7667
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7566
7668
  modelSettings = {
7567
7669
  maxTokens: modelRequirements.maxTokens,
@@ -7591,9 +7693,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7591
7693
  console.info(colors.bgWhite('messages'), JSON.stringify(messages, null, 4));
7592
7694
  }
7593
7695
  rawRequest = [modelName, messages, modelSettings];
7594
- return [4 /*yield*/, (_c = this.client).getChatCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7595
- case 2:
7596
- rawResponse = _d.sent();
7696
+ return [4 /*yield*/, client.getChatCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7697
+ case 3:
7698
+ rawResponse = _c.sent();
7597
7699
  if (this.options.isVerbose) {
7598
7700
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7599
7701
  }
@@ -7628,10 +7730,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7628
7730
  rawResponse: rawResponse,
7629
7731
  // <- [🗯]
7630
7732
  }];
7631
- case 3:
7632
- error_1 = _d.sent();
7733
+ case 4:
7734
+ error_1 = _c.sent();
7633
7735
  throw this.transformAzureError(error_1);
7634
- case 4: return [2 /*return*/];
7736
+ case 5: return [2 /*return*/];
7635
7737
  }
7636
7738
  });
7637
7739
  });
@@ -7642,22 +7744,24 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7642
7744
  AzureOpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7643
7745
  var _a, _b;
7644
7746
  return __awaiter(this, void 0, void 0, function () {
7645
- var content, parameters, modelRequirements, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7646
- var _c;
7647
- return __generator(this, function (_d) {
7648
- switch (_d.label) {
7747
+ var content, parameters, modelRequirements, client, modelName, modelSettings, start, complete, rawPromptContent, rawRequest, rawResponse, resultContent, usage, error_2;
7748
+ return __generator(this, function (_c) {
7749
+ switch (_c.label) {
7649
7750
  case 0:
7650
7751
  if (this.options.isVerbose) {
7651
7752
  console.info('🖋 OpenAI callCompletionModel call');
7652
7753
  }
7653
7754
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
7755
+ return [4 /*yield*/, this.getClient()];
7756
+ case 1:
7757
+ client = _c.sent();
7654
7758
  // TODO: [☂] Use here more modelRequirements
7655
7759
  if (modelRequirements.modelVariant !== 'COMPLETION') {
7656
7760
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
7657
7761
  }
7658
- _d.label = 1;
7659
- case 1:
7660
- _d.trys.push([1, 3, , 4]);
7762
+ _c.label = 2;
7763
+ case 2:
7764
+ _c.trys.push([2, 4, , 5]);
7661
7765
  modelName = prompt.modelRequirements.modelName || this.options.deploymentName;
7662
7766
  modelSettings = {
7663
7767
  maxTokens: modelRequirements.maxTokens || 2000,
@@ -7679,9 +7783,9 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7679
7783
  [rawPromptContent],
7680
7784
  modelSettings,
7681
7785
  ];
7682
- return [4 /*yield*/, (_c = this.client).getCompletions.apply(_c, __spreadArray([], __read(rawRequest), false))];
7683
- case 2:
7684
- rawResponse = _d.sent();
7786
+ return [4 /*yield*/, client.getCompletions.apply(client, __spreadArray([], __read(rawRequest), false))];
7787
+ case 3:
7788
+ rawResponse = _c.sent();
7685
7789
  if (this.options.isVerbose) {
7686
7790
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
7687
7791
  }
@@ -7713,10 +7817,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7713
7817
  rawResponse: rawResponse,
7714
7818
  // <- [🗯]
7715
7819
  }];
7716
- case 3:
7717
- error_2 = _d.sent();
7820
+ case 4:
7821
+ error_2 = _c.sent();
7718
7822
  throw this.transformAzureError(error_2);
7719
- case 4: return [2 /*return*/];
7823
+ case 5: return [2 /*return*/];
7720
7824
  }
7721
7825
  });
7722
7826
  });
@@ -7732,25 +7836,6 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7732
7836
  var code = azureError.code, message = azureError.message;
7733
7837
  return new PipelineExecutionError("".concat(code, ": ").concat(message));
7734
7838
  };
7735
- /**
7736
- * List all available Azure OpenAI models that can be used
7737
- */
7738
- AzureOpenAiExecutionTools.prototype.listModels = function () {
7739
- return __awaiter(this, void 0, void 0, function () {
7740
- return __generator(this, function (_a) {
7741
- // TODO: !!! Do here some filtering which models are really available as deployment
7742
- // @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01
7743
- return [2 /*return*/, OPENAI_MODELS.map(function (_a) {
7744
- var modelTitle = _a.modelTitle, modelName = _a.modelName, modelVariant = _a.modelVariant;
7745
- return ({
7746
- modelTitle: "Azure ".concat(modelTitle),
7747
- modelName: modelName,
7748
- modelVariant: modelVariant,
7749
- });
7750
- })];
7751
- });
7752
- });
7753
- };
7754
7839
  return AzureOpenAiExecutionTools;
7755
7840
  }());
7756
7841
  /**
@@ -7812,12 +7897,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
7812
7897
  function OpenAiExecutionTools(options) {
7813
7898
  if (options === void 0) { options = {}; }
7814
7899
  this.options = options;
7815
- // Note: Passing only OpenAI relevant options to OpenAI constructor
7816
- var openAiOptions = __assign({}, options);
7817
- delete openAiOptions.isVerbose;
7818
- delete openAiOptions.user;
7819
- this.client = new OpenAI(__assign({}, openAiOptions));
7820
- // <- TODO: !!!!!! Lazy-load client
7900
+ /**
7901
+ * OpenAI API client.
7902
+ */
7903
+ this.client = null;
7821
7904
  }
7822
7905
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7823
7906
  get: function () {
@@ -7833,12 +7916,54 @@ var OpenAiExecutionTools = /** @class */ (function () {
7833
7916
  enumerable: false,
7834
7917
  configurable: true
7835
7918
  });
7919
+ OpenAiExecutionTools.prototype.getClient = function () {
7920
+ return __awaiter(this, void 0, void 0, function () {
7921
+ var openAiOptions;
7922
+ return __generator(this, function (_a) {
7923
+ if (this.client === null) {
7924
+ openAiOptions = __assign({}, this.options);
7925
+ delete openAiOptions.isVerbose;
7926
+ delete openAiOptions.user;
7927
+ this.client = new OpenAI(__assign({}, openAiOptions));
7928
+ }
7929
+ return [2 /*return*/, this.client];
7930
+ });
7931
+ });
7932
+ };
7933
+ /**
7934
+ * Check the `options` passed to `constructor`
7935
+ */
7936
+ OpenAiExecutionTools.prototype.checkConfiguration = function () {
7937
+ return __awaiter(this, void 0, void 0, function () {
7938
+ return __generator(this, function (_a) {
7939
+ switch (_a.label) {
7940
+ case 0: return [4 /*yield*/, this.getClient()];
7941
+ case 1:
7942
+ _a.sent();
7943
+ return [2 /*return*/];
7944
+ }
7945
+ });
7946
+ });
7947
+ };
7948
+ /**
7949
+ * List all available OpenAI models that can be used
7950
+ */
7951
+ OpenAiExecutionTools.prototype.listModels = function () {
7952
+ /*
7953
+ Note: Dynamic lising of the models
7954
+ const models = await this.openai.models.list({});
7955
+
7956
+ console.log({ models });
7957
+ console.log(models.data);
7958
+ */
7959
+ return OPENAI_MODELS;
7960
+ };
7836
7961
  /**
7837
7962
  * Calls OpenAI API to use a chat model.
7838
7963
  */
7839
7964
  OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
7840
7965
  return __awaiter(this, void 0, void 0, function () {
7841
- var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7966
+ var content, parameters, modelRequirements, expectFormat, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7842
7967
  return __generator(this, function (_a) {
7843
7968
  switch (_a.label) {
7844
7969
  case 0:
@@ -7846,6 +7971,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7846
7971
  console.info('💬 OpenAI callChatModel call', { prompt: prompt });
7847
7972
  }
7848
7973
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
7974
+ return [4 /*yield*/, this.getClient()];
7975
+ case 1:
7976
+ client = _a.sent();
7849
7977
  // TODO: [☂] Use here more modelRequirements
7850
7978
  if (modelRequirements.modelVariant !== 'CHAT') {
7851
7979
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
@@ -7882,8 +8010,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7882
8010
  if (this.options.isVerbose) {
7883
8011
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7884
8012
  }
7885
- return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
7886
- case 1:
8013
+ return [4 /*yield*/, client.chat.completions.create(rawRequest)];
8014
+ case 2:
7887
8015
  rawResponse = _a.sent();
7888
8016
  if (this.options.isVerbose) {
7889
8017
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7924,7 +8052,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7924
8052
  */
7925
8053
  OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
7926
8054
  return __awaiter(this, void 0, void 0, function () {
7927
- var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
8055
+ var content, parameters, modelRequirements, client, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7928
8056
  return __generator(this, function (_a) {
7929
8057
  switch (_a.label) {
7930
8058
  case 0:
@@ -7932,6 +8060,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7932
8060
  console.info('🖋 OpenAI callCompletionModel call', { prompt: prompt });
7933
8061
  }
7934
8062
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
8063
+ return [4 /*yield*/, this.getClient()];
8064
+ case 1:
8065
+ client = _a.sent();
7935
8066
  // TODO: [☂] Use here more modelRequirements
7936
8067
  if (modelRequirements.modelVariant !== 'COMPLETION') {
7937
8068
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
@@ -7951,8 +8082,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7951
8082
  if (this.options.isVerbose) {
7952
8083
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
7953
8084
  }
7954
- return [4 /*yield*/, this.client.completions.create(rawRequest)];
7955
- case 1:
8085
+ return [4 /*yield*/, client.completions.create(rawRequest)];
8086
+ case 2:
7956
8087
  rawResponse = _a.sent();
7957
8088
  if (this.options.isVerbose) {
7958
8089
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -7990,7 +8121,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7990
8121
  */
7991
8122
  OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
7992
8123
  return __awaiter(this, void 0, void 0, function () {
7993
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
8124
+ var content, parameters, modelRequirements, client, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
7994
8125
  return __generator(this, function (_a) {
7995
8126
  switch (_a.label) {
7996
8127
  case 0:
@@ -7998,6 +8129,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
7998
8129
  console.info('🖋 OpenAI embedding call', { prompt: prompt });
7999
8130
  }
8000
8131
  content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
8132
+ return [4 /*yield*/, this.getClient()];
8133
+ case 1:
8134
+ client = _a.sent();
8001
8135
  // TODO: [☂] Use here more modelRequirements
8002
8136
  if (modelRequirements.modelVariant !== 'EMBEDDING') {
8003
8137
  throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
@@ -8012,8 +8146,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
8012
8146
  if (this.options.isVerbose) {
8013
8147
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
8014
8148
  }
8015
- return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
8016
- case 1:
8149
+ return [4 /*yield*/, client.embeddings.create(rawRequest)];
8150
+ case 2:
8017
8151
  rawResponse = _a.sent();
8018
8152
  if (this.options.isVerbose) {
8019
8153
  console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
@@ -8079,20 +8213,6 @@ var OpenAiExecutionTools = /** @class */ (function () {
8079
8213
  OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
8080
8214
  return this.getDefaultModel('text-embedding-3-large');
8081
8215
  };
8082
- // <- Note: [🤖] getDefaultXxxModel
8083
- /**
8084
- * List all available OpenAI models that can be used
8085
- */
8086
- OpenAiExecutionTools.prototype.listModels = function () {
8087
- /*
8088
- Note: Dynamic lising of the models
8089
- const models = await this.openai.models.list({});
8090
-
8091
- console.log({ models });
8092
- console.log(models.data);
8093
- */
8094
- return OPENAI_MODELS;
8095
- };
8096
8216
  return OpenAiExecutionTools;
8097
8217
  }());
8098
8218
  /**
@@ -8171,6 +8291,20 @@ function createLlmToolsFromConfiguration(configuration, options) {
8171
8291
  * TODO: This should be maybe not under `_common` but under `utils`
8172
8292
  */
8173
8293
 
8294
+ /**
8295
+ * @@@
8296
+ *
8297
+ * Note: `$` is used to indicate that this function is not a pure function - it access global
8298
+ *
8299
+ * @public exported from `@promptbook/utils`
8300
+ */
8301
+ function $getGlobalScope() {
8302
+ return Function('return this')();
8303
+ }
8304
+ /***
8305
+ * TODO: !!!!! Make private and promptbook registry from this
8306
+ */
8307
+
8174
8308
  /**
8175
8309
  * Register is @@@
8176
8310
  *
@@ -8185,13 +8319,31 @@ var Register = /** @class */ (function () {
8185
8319
  return this.storage;
8186
8320
  };
8187
8321
  Register.prototype.register = function (registered) {
8188
- // !!!!!! <- TODO: What to return here
8189
- // TODO: !!!!!! Compare if same is not already registered
8190
- this.storage.push(registered);
8322
+ // <- TODO: What to return here
8323
+ var packageName = registered.packageName, className = registered.className;
8324
+ var existingRegistrationIndex = this.storage.findIndex(function (item) { return item.packageName === packageName && item.className === className; });
8325
+ var existingRegistration = this.storage[existingRegistrationIndex];
8326
+ if (!existingRegistration) {
8327
+ console.warn("[\uD83D\uDCE6] Registering ".concat(packageName, ".").concat(className, " again"));
8328
+ this.storage.push(registered);
8329
+ }
8330
+ else {
8331
+ console.warn("[\uD83D\uDCE6] Re-registering ".concat(packageName, ".").concat(className, " again"));
8332
+ this.storage[existingRegistrationIndex] = registered;
8333
+ }
8191
8334
  };
8192
8335
  return Register;
8193
8336
  }());
8194
8337
 
8338
+ // TODO: !!!!!! Move this logic to Register and rename to $Register
8339
+ var globalScope = $getGlobalScope();
8340
+ if (globalScope.$llmToolsMetadataRegister === undefined) {
8341
+ globalScope.$llmToolsMetadataRegister = [];
8342
+ }
8343
+ else if (!Array.isArray(globalScope.$llmToolsMetadataRegister)) {
8344
+ throw new UnexpectedError("Expected $llmToolsMetadataRegister to be an array, but got ".concat(typeof globalScope.$llmToolsMetadataRegister));
8345
+ }
8346
+ var _ = globalScope.$llmToolsMetadataRegister;
8195
8347
  /**
8196
8348
  * @@@
8197
8349
  *
@@ -8199,9 +8351,8 @@ var Register = /** @class */ (function () {
8199
8351
  * @singleton Only one instance of each register is created per build, but thare can be more @@@
8200
8352
  * @public exported from `@promptbook/core`
8201
8353
  */
8202
- var $llmToolsMetadataRegister = new Register([
8203
- // TODO: !!!!!! Take from global scope
8204
- ]);
8354
+ var $llmToolsMetadataRegister = new Register(_);
8355
+ $getGlobalScope().$llmToolsMetadataRegister;
8205
8356
 
8206
8357
  /**
8207
8358
  * @@@
@@ -8258,7 +8409,7 @@ function createLlmToolsFromEnv(options) {
8258
8409
  var configuration = createLlmToolsFromConfigurationFromEnv();
8259
8410
  if (configuration.length === 0) {
8260
8411
  // TODO: [🥃]
8261
- throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
8412
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n !!!!!!!@@@@You have maybe forgotten to two things:\n !!!!!!! List all available LLM tools in your environment\n - Azure \n - OpenAI (not imported)\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
8262
8413
  }
8263
8414
  return createLlmToolsFromConfiguration(configuration, options);
8264
8415
  }
@@ -8476,7 +8627,6 @@ function getLlmToolsForCli(options) {
8476
8627
  * TODO: This should be maybe not under `_common` but under `utils-internal` / `utils/internal`
8477
8628
  */
8478
8629
 
8479
- // TODO: !!!!!! Probbably all LLM tools should be registered in `@promptbook/cli`
8480
8630
  /**
8481
8631
  * Initializes `make` command for Promptbook CLI utilities
8482
8632
  *
@@ -8988,9 +9138,9 @@ function promptbookCli() {
8988
9138
  *
8989
9139
  * @public exported from `@promptbook/cli`
8990
9140
  */
8991
- var __CLI = {
9141
+ var _CLI = {
8992
9142
  // Note: [🥠]
8993
- __initialize: promptbookCli,
9143
+ _initialize: promptbookCli,
8994
9144
  };
8995
9145
  /**
8996
9146
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
@@ -9036,6 +9186,52 @@ var _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register({
9036
9186
  },
9037
9187
  });
9038
9188
 
9189
+ /**
9190
+ * @@@ registration1 of default configuration for Azure Open AI
9191
+ *
9192
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
9193
+ *
9194
+ * @public exported from `@promptbook/core`
9195
+ * @public exported from `@promptbook/cli`
9196
+ */
9197
+ var _AzureOpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
9198
+ title: 'Azure Open AI',
9199
+ packageName: '@promptbook/azure-openai',
9200
+ className: 'AzureOpenAiExecutionTools',
9201
+ getBoilerplateConfiguration: function () {
9202
+ return {
9203
+ title: 'Azure Open AI (boilerplate)',
9204
+ packageName: '@promptbook/azure-openai',
9205
+ className: 'AzureOpenAiExecutionTools',
9206
+ options: {
9207
+ apiKey: 'sk-',
9208
+ },
9209
+ };
9210
+ },
9211
+ createConfigurationFromEnv: function (env) {
9212
+ if (typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' &&
9213
+ typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' &&
9214
+ typeof env.AZUREOPENAI_API_KEY === 'string') {
9215
+ return {
9216
+ title: 'Azure Open AI (from env)',
9217
+ packageName: '@promptbook/azure-openai',
9218
+ className: 'AzureOpenAiExecutionTools',
9219
+ options: {
9220
+ resourceName: env.AZUREOPENAI_RESOURCE_NAME,
9221
+ deploymentName: env.AZUREOPENAI_DEPLOYMENT_NAME,
9222
+ apiKey: env.AZUREOPENAI_API_KEY,
9223
+ },
9224
+ };
9225
+ }
9226
+ else if (typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' ||
9227
+ typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' ||
9228
+ typeof env.AZUREOPENAI_API_KEY === 'string') {
9229
+ throw new Error(spaceTrim("\n You must provide all of the following environment variables:\n \n - AZUREOPENAI_RESOURCE_NAME (".concat(typeof env.AZUREOPENAI_RESOURCE_NAME === 'string' ? 'defined' : 'not defined', ")\n - AZUREOPENAI_DEPLOYMENT_NAME (").concat(typeof env.AZUREOPENAI_DEPLOYMENT_NAME === 'string' ? 'defined' : 'not defined', ")\n - AZUREOPENAI_API_KEY (").concat(typeof env.AZUREOPENAI_API_KEY === 'string' ? 'defined' : 'not defined', ") \n ")));
9230
+ }
9231
+ return null;
9232
+ },
9233
+ });
9234
+
9039
9235
  /**
9040
9236
  * @@@ registration1 of default configuration for Open AI
9041
9237
  *
@@ -9045,9 +9241,9 @@ var _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register({
9045
9241
  * @public exported from `@promptbook/cli`
9046
9242
  */
9047
9243
  var _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
9048
- title: 'Anthropic Claude',
9049
- packageName: '@promptbook/anthropic-claude',
9050
- className: 'AnthropicClaudeExecutionTools',
9244
+ title: 'Open AI',
9245
+ packageName: '@promptbook/openai',
9246
+ className: 'OpenAiExecutionTools',
9051
9247
  getBoilerplateConfiguration: function () {
9052
9248
  return {
9053
9249
  title: 'Open AI (boilerplate)',
@@ -9073,5 +9269,5 @@ var _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
9073
9269
  },
9074
9270
  });
9075
9271
 
9076
- export { PROMPTBOOK_VERSION, _AnthropicClaudeMetadataRegistration, _OpenAiMetadataRegistration, __CLI };
9272
+ export { PROMPTBOOK_VERSION, _AnthropicClaudeMetadataRegistration, _AzureOpenAiMetadataRegistration, _CLI, _OpenAiMetadataRegistration };
9077
9273
  //# sourceMappingURL=index.es.js.map