@promptbook/node 0.66.0-0 → 0.66.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/esm/index.es.js +101 -37
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +4 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +12 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +6 -2
  7. package/esm/typings/src/config.d.ts +15 -0
  8. package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
  9. package/esm/typings/src/execution/LlmExecutionTools.d.ts +12 -24
  10. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
  11. package/esm/typings/src/llm-providers/_common/$llmToolsConfigurationBoilerplatesRegister.d.ts +12 -0
  12. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
  13. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +2 -10
  14. package/esm/typings/src/llm-providers/_common/config.d.ts +5 -6
  15. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  16. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  17. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  18. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  19. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
  20. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
  21. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
  22. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  23. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  24. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  26. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
  27. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  28. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +6 -2
  29. package/esm/typings/src/llm-providers/openai/computeOpenAiUsage.test.d.ts +4 -0
  30. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
  31. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
  33. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
  34. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  35. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +5 -2
  36. package/esm/typings/src/utils/Register.d.ts +22 -0
  37. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +3 -0
  38. package/package.json +2 -2
  39. package/umd/index.umd.js +101 -37
  40. package/umd/index.umd.js.map +1 -1
  41. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +0 -1
package/esm/index.es.js CHANGED
@@ -17,7 +17,7 @@ import OpenAI from 'openai';
17
17
  /**
18
18
  * The version of the Promptbook library
19
19
  */
20
- var PROMPTBOOK_VERSION = '0.65.0';
20
+ var PROMPTBOOK_VERSION = '0.66.0-3';
21
21
  // TODO: !!!! List here all the versions and annotate + put into script
22
22
 
23
23
  /*! *****************************************************************************
@@ -249,6 +249,9 @@ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
249
249
  * @private within the repository
250
250
  */
251
251
  var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
252
+ /**
253
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
254
+ */
252
255
 
253
256
  /**
254
257
  * Prettify the html code
@@ -693,7 +696,7 @@ function forEachAsync(array, options, callbackfunction) {
693
696
  });
694
697
  }
695
698
 
696
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
699
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-3",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
697
700
 
698
701
  /**
699
702
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -6570,6 +6573,15 @@ var ANTHROPIC_CLAUDE_MODELS = [
6570
6573
  * TODO: [🎰] Some mechanism to auto-update available models
6571
6574
  */
6572
6575
 
6576
+ /**
6577
+ * Get current date in ISO 8601 format
6578
+ *
6579
+ * @private internal utility
6580
+ */
6581
+ function getCurrentIsoDate() {
6582
+ return new Date().toISOString();
6583
+ }
6584
+
6573
6585
  /**
6574
6586
  * Helper of usage compute
6575
6587
  *
@@ -6604,13 +6616,42 @@ function uncertainNumber(value) {
6604
6616
  }
6605
6617
 
6606
6618
  /**
6607
- * Get current date in ISO 8601 format
6619
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6608
6620
  *
6609
- * @private internal utility
6621
+ * @param promptContent The content of the prompt
6622
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6623
+ * @param rawResponse The raw response from Anthropic Claude API
6624
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6625
+ * @private internal utility of `AnthropicClaudeExecutionTools`
6610
6626
  */
6611
- function getCurrentIsoDate() {
6612
- return new Date().toISOString();
6627
+ function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6628
+ resultContent, rawResponse) {
6629
+ var _a, _b;
6630
+ if (rawResponse.usage === undefined) {
6631
+ throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6632
+ }
6633
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6634
+ throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6635
+ }
6636
+ var inputTokens = rawResponse.usage.input_tokens;
6637
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6638
+ var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6639
+ var price;
6640
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6641
+ price = uncertainNumber();
6642
+ }
6643
+ else {
6644
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6645
+ }
6646
+ return {
6647
+ price: price,
6648
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6649
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6650
+ };
6613
6651
  }
6652
+ /**
6653
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6654
+ */
6614
6655
 
6615
6656
  /**
6616
6657
  * Execution Tools for calling Anthropic Claude API.
@@ -6631,9 +6672,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6631
6672
  var anthropicOptions = __assign({}, options);
6632
6673
  delete anthropicOptions.isVerbose;
6633
6674
  delete anthropicOptions.isProxied;
6634
- this.client = new Anthropic(
6635
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6636
- anthropicOptions);
6675
+ this.client = new Anthropic(anthropicOptions);
6676
+ // <- TODO: !!!!!! Lazy-load client
6637
6677
  }
6638
6678
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6639
6679
  get: function () {
@@ -6704,15 +6744,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6704
6744
  if (contentBlock.type !== 'text') {
6705
6745
  throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6706
6746
  }
6707
- console.log('!!!!!! rawResponse.usage', rawResponse.usage);
6708
6747
  resultContent = contentBlock.text;
6709
6748
  // eslint-disable-next-line prefer-const
6710
6749
  complete = getCurrentIsoDate();
6711
- usage = {
6712
- price: { value: 0, isUncertain: true } /* <- TODO: [🐞] !!!!!! Compute usage */,
6713
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
6714
- output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
6715
- };
6750
+ usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6716
6751
  return [2 /*return*/, {
6717
6752
  content: resultContent,
6718
6753
  modelName: rawResponse.model,
@@ -6838,7 +6873,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6838
6873
  * TODO: [🍆] JSON mode
6839
6874
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
6840
6875
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6841
- * TODO: Maybe make custom OpenaiError
6876
+ * TODO: Maybe make custom OpenAiError
6842
6877
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6843
6878
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
6844
6879
  * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
@@ -6849,7 +6884,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6849
6884
  *
6850
6885
  * @public exported from `@promptbook/anthropic-claude`
6851
6886
  */
6852
- function createAnthropicClaudeExecutionTools(options) {
6887
+ var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
6853
6888
  if (options.isProxied) {
6854
6889
  return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
6855
6890
  {
@@ -6860,14 +6895,17 @@ function createAnthropicClaudeExecutionTools(options) {
6860
6895
  },
6861
6896
  ], models: ANTHROPIC_CLAUDE_MODELS }));
6862
6897
  }
6863
- return new AnthropicClaudeExecutionTools(
6864
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6865
- options);
6866
- }
6898
+ return new AnthropicClaudeExecutionTools(options);
6899
+ }, {
6900
+ packageName: '@promptbook/anthropic-claude',
6901
+ className: 'AnthropicClaudeExecutionTools',
6902
+ });
6867
6903
  /**
6868
6904
  * TODO: [🧠] !!!! Make anonymous this with all LLM providers
6869
- * TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
6905
+ * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
6870
6906
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
6907
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
6908
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
6871
6909
  */
6872
6910
 
6873
6911
  /**
@@ -7237,9 +7275,8 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7237
7275
  this.options = options;
7238
7276
  this.client = new OpenAIClient(
7239
7277
  // <- TODO: [🧱] Implement in a functional (not new Class) way
7240
- "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(
7241
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7242
- options.apiKey));
7278
+ "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7279
+ // <- TODO: !!!!!! Lazy-load client
7243
7280
  }
7244
7281
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7245
7282
  get: function () {
@@ -7470,7 +7507,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7470
7507
  }());
7471
7508
  /**
7472
7509
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7473
- * TODO: Maybe make custom AzureOpenaiError
7510
+ * TODO: Maybe make custom AzureOpenAiError
7474
7511
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7475
7512
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7476
7513
  */
@@ -7484,7 +7521,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7484
7521
  * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7485
7522
  * @private internal utility of `OpenAiExecutionTools`
7486
7523
  */
7487
- function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7524
+ function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7488
7525
  resultContent, rawResponse) {
7489
7526
  var _a, _b;
7490
7527
  if (rawResponse.usage === undefined) {
@@ -7509,9 +7546,12 @@ resultContent, rawResponse) {
7509
7546
  output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7510
7547
  };
7511
7548
  }
7549
+ /**
7550
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7551
+ */
7512
7552
 
7513
7553
  /**
7514
- * Execution Tools for calling OpenAI API.
7554
+ * Execution Tools for calling OpenAI API
7515
7555
  *
7516
7556
  * @public exported from `@promptbook/openai`
7517
7557
  */
@@ -7529,6 +7569,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7529
7569
  delete openAiOptions.isVerbose;
7530
7570
  delete openAiOptions.user;
7531
7571
  this.client = new OpenAI(__assign({}, openAiOptions));
7572
+ // <- TODO: !!!!!! Lazy-load client
7532
7573
  }
7533
7574
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7534
7575
  get: function () {
@@ -7609,7 +7650,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7609
7650
  resultContent = rawResponse.choices[0].message.content;
7610
7651
  // eslint-disable-next-line prefer-const
7611
7652
  complete = getCurrentIsoDate();
7612
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
7653
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7613
7654
  if (resultContent === null) {
7614
7655
  throw new PipelineExecutionError('No response message from OpenAI');
7615
7656
  }
@@ -7678,7 +7719,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7678
7719
  resultContent = rawResponse.choices[0].text;
7679
7720
  // eslint-disable-next-line prefer-const
7680
7721
  complete = getCurrentIsoDate();
7681
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
7722
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7682
7723
  return [2 /*return*/, {
7683
7724
  content: resultContent,
7684
7725
  modelName: rawResponse.model || modelName,
@@ -7735,7 +7776,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7735
7776
  resultContent = rawResponse.data[0].embedding;
7736
7777
  // eslint-disable-next-line prefer-const
7737
7778
  complete = getCurrentIsoDate();
7738
- usage = computeOpenaiUsage(content, '', rawResponse);
7779
+ usage = computeOpenAiUsage(content, '', rawResponse);
7739
7780
  return [2 /*return*/, {
7740
7781
  content: resultContent,
7741
7782
  modelName: rawResponse.model || modelName,
@@ -7809,18 +7850,37 @@ var OpenAiExecutionTools = /** @class */ (function () {
7809
7850
  /**
7810
7851
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
7811
7852
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7812
- * TODO: Maybe make custom OpenaiError
7853
+ * TODO: Maybe make custom OpenAiError
7813
7854
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7814
7855
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7815
7856
  */
7816
7857
 
7817
7858
  /**
7859
+ * Execution Tools for calling OpenAI API
7860
+ *
7861
+ * @public exported from `@promptbook/openai`
7862
+ */
7863
+ var createOpenAiExecutionTools = Object.assign(function (options) {
7864
+ // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
7865
+ return new OpenAiExecutionTools(options);
7866
+ }, {
7867
+ packageName: '@promptbook/openai',
7868
+ className: 'OpenAiExecutionTools',
7869
+ });
7870
+ /**
7871
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7872
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7873
+ */
7874
+
7875
+ /**
7876
+ * @@@
7877
+ *
7878
+ * TODO: !!!!!! Not centralized - register each provider to each package
7879
+ *
7818
7880
  * @private internal type for `createLlmToolsFromConfiguration`
7819
7881
  */
7820
7882
  var EXECUTION_TOOLS_CLASSES = {
7821
- createOpenAiExecutionTools: function (options) {
7822
- return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7823
- },
7883
+ createOpenAiExecutionTools: createOpenAiExecutionTools,
7824
7884
  createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
7825
7885
  createAzureOpenAiExecutionTools: function (options) {
7826
7886
  return new AzureOpenAiExecutionTools(
@@ -7830,7 +7890,7 @@ var EXECUTION_TOOLS_CLASSES = {
7830
7890
  // <- Note: [🦑] Add here new LLM provider
7831
7891
  };
7832
7892
  /**
7833
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
7893
+ * TODO: !!!!!!! Make global register for this
7834
7894
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7835
7895
  */
7836
7896
 
@@ -7846,7 +7906,11 @@ function createLlmToolsFromConfiguration(configuration, options) {
7846
7906
  if (options === void 0) { options = {}; }
7847
7907
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7848
7908
  var llmTools = configuration.map(function (llmConfiguration) {
7849
- return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7909
+ var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
7910
+ if (!constructor) {
7911
+ throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
7912
+ }
7913
+ return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
7850
7914
  });
7851
7915
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
7852
7916
  }