@promptbook/core 0.66.0-1 → 0.66.0-5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/esm/index.es.js +167 -61
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +8 -2
  5. package/esm/typings/src/_packages/openai.index.d.ts +4 -0
  6. package/esm/typings/src/_packages/types.index.d.ts +10 -2
  7. package/esm/typings/src/cli/cli-commands/make.d.ts +1 -1
  8. package/esm/typings/src/config.d.ts +0 -7
  9. package/esm/typings/src/execution/AvailableModel.d.ts +20 -0
  10. package/esm/typings/src/execution/LlmExecutionTools.d.ts +1 -19
  11. package/esm/typings/src/execution/LlmExecutionToolsConstructor.d.ts +10 -0
  12. package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.test.d.ts +1 -1
  13. package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.test.d.ts +1 -1
  14. package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.test.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/_common/$llmToolsMetadataRegister.d.ts +10 -0
  16. package/esm/typings/src/llm-providers/_common/$llmToolsRegister.d.ts +10 -0
  17. package/esm/typings/src/llm-providers/_common/LlmToolsConfiguration.d.ts +7 -13
  18. package/esm/typings/src/llm-providers/_common/LlmToolsMetadata.d.ts +27 -0
  19. package/esm/typings/src/llm-providers/_common/LlmToolsOptions.d.ts +7 -0
  20. package/esm/typings/src/llm-providers/_common/config.d.ts +4 -0
  21. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +2 -2
  22. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools.d.ts +7 -2
  26. package/esm/typings/src/llm-providers/anthropic-claude/register-configuration.d.ts +8 -0
  27. package/esm/typings/src/llm-providers/anthropic-claude/register-constructor.d.ts +11 -0
  28. package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
  29. package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +1 -1
  30. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +1 -1
  31. package/esm/typings/src/llm-providers/multiple/MultipleLlmExecutionTools.d.ts +1 -1
  32. package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
  33. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.d.ts → computeOpenAiUsage.d.ts} +2 -2
  34. package/esm/typings/src/llm-providers/openai/{computeOpenaiUsage.test.d.ts → computeOpenAiUsage.test.d.ts} +1 -1
  35. package/esm/typings/src/llm-providers/openai/createOpenAiExecutionTools.d.ts +15 -0
  36. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +1 -1
  37. package/esm/typings/src/llm-providers/openai/register-configuration.d.ts +8 -0
  38. package/esm/typings/src/llm-providers/openai/register-constructor.d.ts +11 -0
  39. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
  40. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +2 -2
  41. package/esm/typings/src/personas/preparePersona.test.d.ts +1 -1
  42. package/esm/typings/src/utils/Register.d.ts +22 -0
  43. package/package.json +1 -1
  44. package/umd/index.umd.js +170 -61
  45. package/umd/index.umd.js.map +1 -1
package/esm/index.es.js CHANGED
@@ -14,7 +14,7 @@ import moment from 'moment';
14
14
  /**
15
15
  * The version of the Promptbook library
16
16
  */
17
- var PROMPTBOOK_VERSION = '0.66.0-0';
17
+ var PROMPTBOOK_VERSION = '0.66.0-4';
18
18
  // TODO: !!!! List here all the versions and annotate + put into script
19
19
 
20
20
  /*! *****************************************************************************
@@ -595,44 +595,6 @@ var DEFAULT_REMOTE_URL = 'https://api.pavolhejny.com/';
595
595
  * @public exported from `@promptbook/core`
596
596
  */
597
597
  var DEFAULT_REMOTE_URL_PATH = '/promptbook/socket.io';
598
- // <- TODO: [🧜‍♂️]
599
- /**
600
- * @@@
601
- *
602
- * @public exported from `@promptbook/core`
603
- */
604
- var BOILERPLATE_LLM_TOOLS_CONFIGURATION_ = [
605
- {
606
- title: 'Open AI',
607
- packageName: '@promptbook/openai',
608
- className: 'OpenAiExecutionTools',
609
- options: {
610
- apiKey: 'sk-',
611
- },
612
- },
613
- {
614
- title: 'Anthropic Claude',
615
- packageName: '@promptbook/anthropic-claude',
616
- className: 'AnthropicClaudeExecutionTools',
617
- options: {
618
- apiKey: 'sk-ant-api03-',
619
- isProxied: true,
620
- remoteUrl: DEFAULT_REMOTE_URL,
621
- path: DEFAULT_REMOTE_URL_PATH,
622
- },
623
- },
624
- {
625
- title: 'Azure Open AI',
626
- packageName: '@promptbook/azure-openai',
627
- className: 'AzureOpenAiExecutionTools',
628
- options: {
629
- // TODO: !!!> resourceName
630
- // TODO: !!!> deploymentName
631
- apiKey: 'sk-',
632
- },
633
- },
634
- // <- Note: [🦑] Add here new LLM provider
635
- ];
636
598
  /**
637
599
  * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
638
600
  */
@@ -1683,7 +1645,7 @@ function forEachAsync(array, options, callbackfunction) {
1683
1645
  });
1684
1646
  }
1685
1647
 
1686
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1648
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-4",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1687
1649
 
1688
1650
  var defaultDiacriticsRemovalMap = [
1689
1651
  {
@@ -6535,6 +6497,49 @@ var CallbackInterfaceTools = /** @class */ (function () {
6535
6497
  return CallbackInterfaceTools;
6536
6498
  }());
6537
6499
 
6500
+ /**
6501
+ * Register is @@@
6502
+ *
6503
+ * @private internal utility, exported are only signleton instances of this class
6504
+ */
6505
+ var Register = /** @class */ (function () {
6506
+ function Register(storage) {
6507
+ this.storage = storage;
6508
+ }
6509
+ Register.prototype.list = function () {
6510
+ // <- TODO: ReadonlyDeep<Array<TRegistered>>
6511
+ return this.storage;
6512
+ };
6513
+ Register.prototype.register = function (registered) {
6514
+ // !!!!!! <- TODO: What to return here
6515
+ // TODO: !!!!!! Compare if same is not already registered
6516
+ this.storage.push(registered);
6517
+ };
6518
+ return Register;
6519
+ }());
6520
+
6521
+ /**
6522
+ * @@@
6523
+ *
6524
+ * Note: `$` is used to indicate that this interacts with the global scope
6525
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
6526
+ * @public exported from `@promptbook/core`
6527
+ */
6528
+ var $llmToolsMetadataRegister = new Register([
6529
+ // TODO: !!!!!! Take from global scope
6530
+ ]);
6531
+
6532
+ /**
6533
+ * @@@
6534
+ *
6535
+ * Note: `$` is used to indicate that this interacts with the global scope
6536
+ * @singleton Only one instance of each register is created per build, but thare can be more @@@
6537
+ * @public exported from `@promptbook/core`
6538
+ */
6539
+ var $llmToolsRegister = new Register([
6540
+ // TODO: !!!!!! Take from global scope
6541
+ ]);
6542
+
6538
6543
  /**
6539
6544
  * Remote server is a proxy server that uses its execution tools internally and exposes the executor interface externally.
6540
6545
  *
@@ -6848,7 +6853,7 @@ resultContent, rawResponse) {
6848
6853
  };
6849
6854
  }
6850
6855
  /**
6851
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
6856
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
6852
6857
  */
6853
6858
 
6854
6859
  /**
@@ -7071,7 +7076,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
7071
7076
  * TODO: [🍆] JSON mode
7072
7077
  * TODO: [🧠] Maybe handle errors via transformAnthropicError (like transformAzureError)
7073
7078
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7074
- * TODO: Maybe make custom OpenaiError
7079
+ * TODO: Maybe make custom OpenAiError
7075
7080
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7076
7081
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7077
7082
  * TODO: [📅] Maybe instead of `RemoteLlmExecutionToolsOptions` use `proxyWithAnonymousRemoteServer` (if implemented)
@@ -7082,7 +7087,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
7082
7087
  *
7083
7088
  * @public exported from `@promptbook/anthropic-claude`
7084
7089
  */
7085
- function createAnthropicClaudeExecutionTools(options) {
7090
+ var createAnthropicClaudeExecutionTools = Object.assign(function (options) {
7086
7091
  if (options.isProxied) {
7087
7092
  return new RemoteLlmExecutionTools(__assign(__assign({}, options), { isAnonymous: true, llmToolsConfiguration: [
7088
7093
  {
@@ -7093,14 +7098,17 @@ function createAnthropicClaudeExecutionTools(options) {
7093
7098
  },
7094
7099
  ], models: ANTHROPIC_CLAUDE_MODELS }));
7095
7100
  }
7096
- return new AnthropicClaudeExecutionTools(
7097
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7098
- options);
7099
- }
7101
+ return new AnthropicClaudeExecutionTools(options);
7102
+ }, {
7103
+ packageName: '@promptbook/anthropic-claude',
7104
+ className: 'AnthropicClaudeExecutionTools',
7105
+ });
7100
7106
  /**
7101
7107
  * TODO: [🧠] !!!! Make anonymous this with all LLM providers
7102
- * TODO: [🧠] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7108
+ * TODO: [🧠][🧱] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
7103
7109
  * TODO: [🧠] Maybe auto-detect usage in browser and determine default value of `isProxied`
7110
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
7111
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
7104
7112
  */
7105
7113
 
7106
7114
  /**
@@ -7702,7 +7710,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7702
7710
  }());
7703
7711
  /**
7704
7712
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
7705
- * TODO: Maybe make custom AzureOpenaiError
7713
+ * TODO: Maybe make custom AzureOpenAiError
7706
7714
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
7707
7715
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
7708
7716
  */
@@ -7716,7 +7724,7 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7716
7724
  * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
7717
7725
  * @private internal utility of `OpenAiExecutionTools`
7718
7726
  */
7719
- function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7727
+ function computeOpenAiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
7720
7728
  resultContent, rawResponse) {
7721
7729
  var _a, _b;
7722
7730
  if (rawResponse.usage === undefined) {
@@ -7742,11 +7750,11 @@ resultContent, rawResponse) {
7742
7750
  };
7743
7751
  }
7744
7752
  /**
7745
- * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
7753
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
7746
7754
  */
7747
7755
 
7748
7756
  /**
7749
- * Execution Tools for calling OpenAI API.
7757
+ * Execution Tools for calling OpenAI API
7750
7758
  *
7751
7759
  * @public exported from `@promptbook/openai`
7752
7760
  */
@@ -7845,7 +7853,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7845
7853
  resultContent = rawResponse.choices[0].message.content;
7846
7854
  // eslint-disable-next-line prefer-const
7847
7855
  complete = getCurrentIsoDate();
7848
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
7856
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7849
7857
  if (resultContent === null) {
7850
7858
  throw new PipelineExecutionError('No response message from OpenAI');
7851
7859
  }
@@ -7914,7 +7922,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7914
7922
  resultContent = rawResponse.choices[0].text;
7915
7923
  // eslint-disable-next-line prefer-const
7916
7924
  complete = getCurrentIsoDate();
7917
- usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
7925
+ usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
7918
7926
  return [2 /*return*/, {
7919
7927
  content: resultContent,
7920
7928
  modelName: rawResponse.model || modelName,
@@ -7971,7 +7979,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7971
7979
  resultContent = rawResponse.data[0].embedding;
7972
7980
  // eslint-disable-next-line prefer-const
7973
7981
  complete = getCurrentIsoDate();
7974
- usage = computeOpenaiUsage(content, '', rawResponse);
7982
+ usage = computeOpenAiUsage(content, '', rawResponse);
7975
7983
  return [2 /*return*/, {
7976
7984
  content: resultContent,
7977
7985
  modelName: rawResponse.model || modelName,
@@ -8045,18 +8053,37 @@ var OpenAiExecutionTools = /** @class */ (function () {
8045
8053
  /**
8046
8054
  * TODO: [🧠][🧙‍♂️] Maybe there can be some wizzard for thoose who want to use just OpenAI
8047
8055
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
8048
- * TODO: Maybe make custom OpenaiError
8056
+ * TODO: Maybe make custom OpenAiError
8049
8057
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
8050
8058
  * TODO: [🧠][🌰] Allow to pass `title` for tracking purposes
8051
8059
  */
8052
8060
 
8053
8061
  /**
8062
+ * Execution Tools for calling OpenAI API
8063
+ *
8064
+ * @public exported from `@promptbook/openai`
8065
+ */
8066
+ var createOpenAiExecutionTools = Object.assign(function (options) {
8067
+ // TODO: !!!!!! If browser, auto add `dangerouslyAllowBrowser`
8068
+ return new OpenAiExecutionTools(options);
8069
+ }, {
8070
+ packageName: '@promptbook/openai',
8071
+ className: 'OpenAiExecutionTools',
8072
+ });
8073
+ /**
8074
+ * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
8075
+ * TODO: [🎶] Naming "constructor" vs "creator" vs "factory"
8076
+ */
8077
+
8078
+ /**
8079
+ * @@@
8080
+ *
8081
+ * TODO: !!!!!! Remove EXECUTION_TOOLS_CLASSES and use $llmToolsRegister instead
8082
+ *
8054
8083
  * @private internal type for `createLlmToolsFromConfiguration`
8055
8084
  */
8056
8085
  var EXECUTION_TOOLS_CLASSES = {
8057
- createOpenAiExecutionTools: function (options) {
8058
- return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
8059
- },
8086
+ createOpenAiExecutionTools: createOpenAiExecutionTools,
8060
8087
  createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8061
8088
  createAzureOpenAiExecutionTools: function (options) {
8062
8089
  return new AzureOpenAiExecutionTools(
@@ -8082,7 +8109,11 @@ function createLlmToolsFromConfiguration(configuration, options) {
8082
8109
  if (options === void 0) { options = {}; }
8083
8110
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
8084
8111
  var llmTools = configuration.map(function (llmConfiguration) {
8085
- return EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)](__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8112
+ var constructor = EXECUTION_TOOLS_CLASSES["create".concat(llmConfiguration.className)];
8113
+ if (!constructor) {
8114
+ throw new Error(spaceTrim(function (block) { return "\n There is no constructor for LLM provider `".concat(llmConfiguration.className, "`\n\n\n @@@\n\n Available constructors are:\n ").concat(block('@@@'), "\n\n\n "); }));
8115
+ }
8116
+ return constructor(__assign({ isVerbose: isVerbose }, llmConfiguration.options));
8086
8117
  });
8087
8118
  return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
8088
8119
  }
@@ -8324,6 +8355,81 @@ function limitTotalUsage(llmTools, options) {
8324
8355
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
8325
8356
  */
8326
8357
 
8358
+ /**
8359
+ * @@@ registration1 of default configuration for Anthropic Claude
8360
+ *
8361
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
8362
+ *
8363
+ * @public exported from `@promptbook/core`
8364
+ */
8365
+ var _AnthropicClaudeMetadataRegistration = $llmToolsMetadataRegister.register({
8366
+ title: 'Anthropic Claude',
8367
+ packageName: '@promptbook/anthropic-claude',
8368
+ className: 'AnthropicClaudeExecutionTools',
8369
+ getBoilerplateConfiguration: function () {
8370
+ return {
8371
+ title: 'Anthropic Claude (boilerplate)',
8372
+ packageName: '@promptbook/anthropic-claude',
8373
+ className: 'AnthropicClaudeExecutionTools',
8374
+ options: {
8375
+ apiKey: 'sk-ant-api03-',
8376
+ isProxied: true,
8377
+ remoteUrl: DEFAULT_REMOTE_URL,
8378
+ path: DEFAULT_REMOTE_URL_PATH,
8379
+ },
8380
+ };
8381
+ },
8382
+ createConfigurationFromEnv: function (env) {
8383
+ if (typeof env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
8384
+ return {
8385
+ title: 'Claude (from env)',
8386
+ packageName: '@promptbook/antrhopic-claude',
8387
+ className: 'AnthropicClaudeExecutionTools',
8388
+ options: {
8389
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
8390
+ },
8391
+ };
8392
+ }
8393
+ return null;
8394
+ },
8395
+ });
8396
+
8397
+ /**
8398
+ * @@@ registration1 of default configuration for Open AI
8399
+ *
8400
+ * Note: [🏐] Configurations registrations are done in @@@ BUT constructor @@@
8401
+ *
8402
+ * @public exported from `@promptbook/core`
8403
+ */
8404
+ var _OpenAiMetadataRegistration = $llmToolsMetadataRegister.register({
8405
+ title: 'Anthropic Claude',
8406
+ packageName: '@promptbook/anthropic-claude',
8407
+ className: 'AnthropicClaudeExecutionTools',
8408
+ getBoilerplateConfiguration: function () {
8409
+ return {
8410
+ title: 'Open AI (boilerplate)',
8411
+ packageName: '@promptbook/openai',
8412
+ className: 'OpenAiExecutionTools',
8413
+ options: {
8414
+ apiKey: 'sk-',
8415
+ },
8416
+ };
8417
+ },
8418
+ createConfigurationFromEnv: function (env) {
8419
+ if (typeof env.OPENAI_API_KEY === 'string') {
8420
+ return {
8421
+ title: 'Open AI (from env)',
8422
+ packageName: '@promptbook/openai',
8423
+ className: 'OpenAiExecutionTools',
8424
+ options: {
8425
+ apiKey: process.env.OPENAI_API_KEY,
8426
+ },
8427
+ };
8428
+ }
8429
+ return null;
8430
+ },
8431
+ });
8432
+
8327
8433
  /**
8328
8434
  * This class behaves like LocalStorage but separates keys by prefix
8329
8435
  *
@@ -8674,5 +8780,5 @@ function executionReportJsonToString(executionReportJson, options) {
8674
8780
  * TODO: [🧠] Should be in generated file GENERATOR_WARNING
8675
8781
  */
8676
8782
 
8677
- export { BOILERPLATE_LLM_TOOLS_CONFIGURATION_, BlockTypes, CLAIM, CallbackInterfaceTools, CollectionError, DEFAULT_REMOTE_URL, DEFAULT_REMOTE_URL_PATH, EXECUTIONS_CACHE_DIRNAME, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, LimitReachedError, MAX_EXECUTION_ATTEMPTS, MAX_FILENAME_LENGTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, MAX_PARALLEL_COUNT, MODEL_VARIANTS, MemoryStorage, NotFoundError, NotYetImplementedError, PIPELINE_COLLECTION_BASE_FILENAME, PROMPTBOOK_VERSION, ParsingError, PipelineExecutionError, PipelineLogicError, PrefixStorage, RESERVED_PARAMETER_NAMES, ReferenceError$1 as ReferenceError, UnexpectedError, VersionMismatchError, ZERO_USAGE, addUsage, assertsExecutionSuccessful, cacheLlmTools, collectionToJson, countTotalUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, isPassingExpectations, isPipelinePrepared, joinLlmExecutionTools, limitTotalUsage, pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, prepareKnowledgeFromMarkdown, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTemplates, prettifyPipelineString, stringifyPipelineJson, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline };
8783
+ export { $llmToolsMetadataRegister, $llmToolsRegister, BlockTypes, CLAIM, CallbackInterfaceTools, CollectionError, DEFAULT_REMOTE_URL, DEFAULT_REMOTE_URL_PATH, EXECUTIONS_CACHE_DIRNAME, EXPECTATION_UNITS, EnvironmentMismatchError, ExecutionReportStringOptionsDefaults, LimitReachedError, MAX_EXECUTION_ATTEMPTS, MAX_FILENAME_LENGTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_DEPTH, MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL, MAX_PARALLEL_COUNT, MODEL_VARIANTS, MemoryStorage, NotFoundError, NotYetImplementedError, PIPELINE_COLLECTION_BASE_FILENAME, PROMPTBOOK_VERSION, ParsingError, PipelineExecutionError, PipelineLogicError, PrefixStorage, RESERVED_PARAMETER_NAMES, ReferenceError$1 as ReferenceError, UnexpectedError, VersionMismatchError, ZERO_USAGE, _AnthropicClaudeMetadataRegistration, _OpenAiMetadataRegistration, addUsage, assertsExecutionSuccessful, cacheLlmTools, collectionToJson, countTotalUsage, createCollectionFromJson, createCollectionFromPromise, createCollectionFromUrl, createLlmToolsFromConfiguration, createPipelineExecutor, createSubcollection, embeddingVectorToString, executionReportJsonToString, isPassingExpectations, isPipelinePrepared, joinLlmExecutionTools, limitTotalUsage, pipelineJsonToString, pipelineStringToJson, pipelineStringToJsonSync, prepareKnowledgeFromMarkdown, prepareKnowledgePieces, preparePersona, preparePipeline, prepareTemplates, prettifyPipelineString, stringifyPipelineJson, unpreparePipeline, usageToHuman, usageToWorktime, validatePipeline };
8678
8784
  //# sourceMappingURL=index.es.js.map