@promptbook/cli 0.66.0-0 → 0.66.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/esm/index.es.js +101 -37
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +6 -2
  7. package/esm/typings/src/config.d.ts +15 -0
  8. package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
  9. package/esm/typings/src/execution/LlmExecutionTools.d.ts +12 -24
  10. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
  11. package/esm/typings/src/llm-providers/_common/$llmToolsConfigurationBoilerplatesRegister.d.ts +12 -0
  12. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +2 -10
  14. package/esm/typings/src/llm-providers/_common/config.d.ts +5 -6
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  19. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
  20. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
  21. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
  22. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  23. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  24. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  28. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +6 -2
  29. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.test.d.ts +4 -0
  30. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
  31. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
  33. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
  34. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  35. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +5 -2
  36. package/esm/typings/src/utils/Register.d.ts +22 -0
  37. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +3 -0
  38. package/package.json +1 -1
  39. package/umd/index.umd.js +101 -37
  40. package/umd/index.umd.js.map +1 -1
  41. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +0 -1
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ import glob from 'glob-promise';
20
20
  /**
21
21
  * The version of the Promptbook library
22
22
  */
23
- var PROMPTBOOK_VERSION = '0.65.0';
23
+ var PROMPTBOOK_VERSION = '0.66.0-3';
24
24
  // TODO: !!!! List here all the versions and annotate + put into script
25
25
 
26
26
  /*! *****************************************************************************
@@ -303,6 +303,9 @@ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
303
303
  * @private within the repository
304
304
  */
305
305
  var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
306
+ /**
307
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
308
+ */
306
309
 
307
310
  /**
308
311
  * Initializes `about` command for Promptbook CLI utilities
@@ -832,7 +835,7 @@ function forEachAsync(array, options, callbackfunction) {
832
835
  });
833
836
  }
834
837
 
835
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
838
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
836
839
 
837
840
  /**
838
841
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -6806,6 +6809,15 @@ var ANTHROPIC_CLAUDE_MODELS = [
6806
6809
  * TODO: [🎰] Some mechanism to auto-update available models
6807
6810
  */
6808
6811
 
6812
+ /**
6813
+ * Get current date in ISO 8601 format
6814
+ *
6815
+ * @private internal utility
6816
+ */
6817
+ function getCurrentIsoDate() {
6818
+ return new Date().toISOString();
6819
+ }
6820
+
6809
6821
  /**
6810
6822
  * Helper of usage compute
6811
6823
  *
@@ -6840,13 +6852,42 @@ function uncertainNumber(value) {
6840
6852
  }
6841
6853
 
6842
6854
  /**
6843
- * Get current date in ISO 8601 format
6855
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6844
6856
  *
6845
- * @private internal utility
6857
+ * @param promptContent The content of the prompt
6858
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6859
+ * @param rawResponse The raw response from Anthropic Claude API
6860
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6861
+ * @private internal utility of `AnthropicClaudeExecutionTools`
6846
6862
  */
6847
- function getCurrentIsoDate() {
6848
- return new Date().toISOString();
6863
+ function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6864
+ resultContent, rawResponse) {
6865
+ var _a, _b;
6866
+ if (rawResponse.usage === undefined) {
6867
+ throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6868
+ }
6869
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6870
+ throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6871
+ }
6872
+ var inputTokens = rawResponse.usage.input_tokens;
6873
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6874
+ var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6875
+ var price;
6876
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6877
+ price = uncertainNumber();
6878
+ }
6879
+ else {
6880
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6881
+ }
6882
+ return {
6883
+ price: price,
6884
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6885
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6886
+ };
6849
6887
  }
6888
+ /**
6889
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6890
+ */
6850
6891
 
6851
6892
  /**
6852
6893
  * Execution Tools for calling Anthropic Claude API.
@@ -6867,9 +6908,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6867
6908
  var anthropicOptions = __assign({}, options);
6868
6909
  delete anthropicOptions.isVerbose;
6869
6910
  delete anthropicOptions.isProxied;
6870
- this.client = new Anthropic(
6871
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6872
- anthropicOptions);
6911
+ this.client = new Anthropic(anthropicOptions);
6912
+ // <- TODO: !!!!!! Lazy-load client
6873
6913
  }
6874
6914
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6875
6915
  get: function () {
@@ -6940,15 +6980,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6940
6980
  if (contentBlock.type !== 'text') {
6941
6981
  throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6942
6982
  }
6943
- console.log('!!!!!! rawResponse.usage', rawResponse.usage);
6944
6983
  resultContent = contentBlock.text;
6945
6984
  // eslint-disable-next-line prefer-const
6946
6985
  complete = getCurrentIsoDate();
6947
- usage = {
6948
- price: { value: 0, isUncertain: true } /* <- TODO: [🐞] !!!!!! Compute usage */,
6949
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
6950
- output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
6951
- };
6986
+ usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6952
6987
  return [2 /*return*/, {
6953
6988
  content: resultContent,
6954
6989
  modelName: rawResponse.model,
@@ -7074,7 +7109,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
7074
7109
  * TODO: [🍆] JSON mode
7075
7110
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
7076
7111
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7077
- * TODO: Maybe make custom OpenaiError
7112
+ * TODO: Maybe make custom OpenAiError
7078
7113
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7079
7114
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7080
7115
  * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
@@ -7085,7 +7120,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
7085
7120
  *
7086
7121
  * @public exported from `@promptbook/anthropic-claude`
7087
7122
  */
7088
- function createAnthropicClaudeExecutionTools(options) {
7123
+ var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
7089
7124
  if (options.isProxied) {
7090
7125
  return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
7091
7126
  {
@@ -7096,14 +7131,17 @@ function createAnthropicClaudeExecutionTools(options) {
7096
7131
  },
7097
7132
  ], models: ANTHROPIC_CLAUDE_MODELS }));
7098
7133
  }
7099
- return new AnthropicClaudeExecutionTools(
7100
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7101
- options);
7102
- }
7134
+ return new AnthropicClaudeExecutionTools(options);
7135
+ }, {
7136
+ packageName: '@promptbook/anthropic-claude',
7137
+ className: 'AnthropicClaudeExecutionTools',
7138
+ });
7103
7139
  /**
7104
7140
  * TODO: [🧠] !!!! Make anonymous this with all LLM providers
7105
- * TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7141
+ * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7106
7142
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
7143
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7144
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7107
7145
  */
7108
7146
 
7109
7147
  /**
@@ -7473,9 +7511,8 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7473
7511
  this.options = options;
7474
7512
  this.client = new OpenAIClient(
7475
7513
  // <- TODO: [🧱] Implement in a functional (not new Class) way
7476
- "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(
7477
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7478
- options.apiKey));
7514
+ "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7515
+ // <- TODO: !!!!!! Lazy-load client
7479
7516
  }
7480
7517
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7481
7518
  get: function () {
@@ -7706,7 +7743,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7706
7743
  }());
7707
7744
  /**
7708
7745
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7709
- * TODO: Maybe make custom AzureOpenaiError
7746
+ * TODO: Maybe make custom AzureOpenAiError
7710
7747
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7711
7748
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7712
7749
  */
@@ -7720,7 +7757,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7720
7757
  * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7721
7758
  * @private internal utility of `OpenAiExecutionTools`
7722
7759
  */
7723
- function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7760
+ function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7724
7761
  resultContent, rawResponse) {
7725
7762
  var _a, _b;
7726
7763
  if (rawResponse.usage === undefined) {
@@ -7745,9 +7782,12 @@ resultContent, rawResponse) {
7745
7782
  output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7746
7783
  };
7747
7784
  }
7785
+ /**
7786
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7787
+ */
7748
7788
 
7749
7789
  /**
7750
- * Execution Tools for calling OpenAI API.
7790
+ * Execution Tools for calling OpenAI API
7751
7791
  *
7752
7792
  * @public exported from `@promptbook/openai`
7753
7793
  */
@@ -7765,6 +7805,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7765
7805
  delete openAiOptions.isVerbose;
7766
7806
  delete openAiOptions.user;
7767
7807
  this.client = new OpenAI(__assign({}, openAiOptions));
7808
+ // <- TODO: !!!!!! Lazy-load client
7768
7809
  }
7769
7810
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7770
7811
  get: function () {
@@ -7845,7 +7886,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7845
7886
  resultContent = rawResponse.choices[0].message.content;
7846
7887
  // eslint-disable-next-line prefer-const
7847
7888
  complete = getCurrentIsoDate();
7848
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
7889
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7849
7890
  if (resultContent === null) {
7850
7891
  throw new PipelineExecutionError('No response message from OpenAI');
7851
7892
  }
@@ -7914,7 +7955,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7914
7955
  resultContent = rawResponse.choices[0].text;
7915
7956
  // eslint-disable-next-line prefer-const
7916
7957
  complete = getCurrentIsoDate();
7917
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
7958
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7918
7959
  return [2 /*return*/, {
7919
7960
  content: resultContent,
7920
7961
  modelName: rawResponse.model || modelName,
@@ -7971,7 +8012,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7971
8012
  resultContent = rawResponse.data[0].embedding;
7972
8013
  // eslint-disable-next-line prefer-const
7973
8014
  complete = getCurrentIsoDate();
7974
- usage = computeOpenaiUsage(content, '', rawResponse);
8015
+ usage = computeOpenAiUsage(content, '', rawResponse);
7975
8016
  return [2 /*return*/, {
7976
8017
  content: resultContent,
7977
8018
  modelName: rawResponse.model || modelName,
@@ -8045,18 +8086,37 @@ var OpenAiExecutionTools = /** @class */ (function () {
8045
8086
  /**
8046
8087
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
8047
8088
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
8048
- * TODO: Maybe make custom OpenaiError
8089
+ * TODO: Maybe make custom OpenAiError
8049
8090
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
8050
8091
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
8051
8092
  */
8052
8093
 
8053
8094
  /**
8095
+ * Execution Tools for calling OpenAI API
8096
+ *
8097
+ * @public exported from `@promptbook/openai`
8098
+ */
8099
+ var createOpenAiExecutionTools = Object.assign(function (options) {
8100
+ // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
8101
+ return new OpenAiExecutionTools(options);
8102
+ }, {
8103
+ packageName: '@promptbook/openai',
8104
+ className: 'OpenAiExecutionTools',
8105
+ });
8106
+ /**
8107
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
8108
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
8109
+ */
8110
+
8111
+ /**
8112
+ * @@@
8113
+ *
8114
+ * TODO: !!!!!! Not centralized - register each provider to each package
8115
+ *
8054
8116
  * @private internal type for `createLlmToolsFromConfiguration`
8055
8117
  */
8056
8118
  var EXECUTION_TOOLS_CLASSES = {
8057
- createOpenAiExecutionTools: function (options) {
8058
- return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
8059
- },
8119
+ createOpenAiExecutionTools: createOpenAiExecutionTools,
8060
8120
  createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8061
8121
  createAzureOpenAiExecutionTools: function (options) {
8062
8122
  return new AzureOpenAiExecutionTools(
@@ -8066,7 +8126,7 @@ var EXECUTION_TOOLS_CLASSES = {
8066
8126
  // <- Note: [🦑] Add here new LLM provider
8067
8127
  };
8068
8128
  /**
8069
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
8129
+ * TODO: !!!!!!! Make global register for this
8070
8130
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8071
8131
  */
8072
8132
 
@@ -8082,7 +8142,11 @@ function createLlmToolsFromConfiguration(configuration, options) {
8082
8142
  if (options === void 0) { options = {}; }
8083
8143
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
8084
8144
  var llmTools = configuration.map(function (llmConfiguration) {
8085
- return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8145
+ var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
8146
+ if (!constructor) {
8147
+ throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
8148
+ }
8149
+ return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8086
8150
  });
8087
8151
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
8088
8152
  }