@promptbook/node 0.61.0-16 โ†’ 0.61.0-17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -4,6 +4,8 @@ import { join } from 'path';
4
4
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
5
  import { format } from 'prettier';
6
6
  import parserHtml from 'prettier/parser-html';
7
+ import Anthropic from '@anthropic-ai/sdk';
8
+ import OpenAI from 'openai';
7
9
 
8
10
  /*! *****************************************************************************
9
11
  Copyright (c) Microsoft Corporation.
@@ -393,7 +395,7 @@ function forEachAsync(array, options, callbackfunction) {
393
395
  });
394
396
  }
395
397
 
396
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-15",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-15",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
398
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
397
399
 
398
400
  /**
399
401
  * Prettify the html code
@@ -1002,11 +1004,11 @@ function validatePipeline(pipeline) {
1002
1004
  throw new PipelineLogicError(spaceTrim$1(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n Can not resolve:\n ".concat(block(unresovedTemplates
1003
1005
  .map(function (_a) {
1004
1006
  var resultingParameterName = _a.resultingParameterName, dependentParameterNames = _a.dependentParameterNames;
1005
- return "- {".concat(resultingParameterName, "} depends on ").concat(dependentParameterNames
1007
+ return "- Parameter {".concat(resultingParameterName, "} which depends on ").concat(dependentParameterNames
1006
1008
  .map(function (dependentParameterName) { return "{".concat(dependentParameterName, "}"); })
1007
- .join(', '));
1009
+ .join(' and '));
1008
1010
  })
1009
- .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- {".concat(name, "}"); }).join('\n')), "\n "); }));
1011
+ .join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n "); }));
1010
1012
  }
1011
1013
  resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTemplates.map(function (_a) {
1012
1014
  var resultingParameterName = _a.resultingParameterName;
@@ -2029,9 +2031,11 @@ function isPipelinePrepared(pipeline) {
2029
2031
  // Note: Ignoring `pipeline.preparations` @@@
2030
2032
  // Note: Ignoring `pipeline.knowledgePieces` @@@
2031
2033
  if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
2034
+ console.log('!!!!', 'Not all personas have modelRequirements');
2032
2035
  return false;
2033
2036
  }
2034
2037
  if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
2038
+ console.log('!!!!', 'Not all knowledgeSources have preparationIds');
2035
2039
  return false;
2036
2040
  }
2037
2041
  // TODO: !!!!! Is context in each template
@@ -2040,6 +2044,7 @@ function isPipelinePrepared(pipeline) {
2040
2044
  return true;
2041
2045
  }
2042
2046
  /**
2047
+ * TODO: [๐Ÿ ] Maybe base this on `makeValidator`
2043
2048
  * TODO: [๐Ÿ”ผ] Export via core or utils
2044
2049
  * TODO: [๐ŸงŠ] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2045
2050
  */
@@ -2155,7 +2160,7 @@ function replaceParameters(template, parameters) {
2155
2160
  /**
2156
2161
  * The version of the Promptbook library
2157
2162
  */
2158
- var PROMPTBOOK_VERSION = '0.61.0-15';
2163
+ var PROMPTBOOK_VERSION = '0.61.0-16';
2159
2164
  // TODO: !!!! List here all the versions and annotate + put into script
2160
2165
 
2161
2166
  /**
@@ -5604,5 +5609,1086 @@ function listAllFiles(path, isRecursive) {
5604
5609
  * Note: [๐ŸŸข] This code should never be published outside of `@promptbook/node`
5605
5610
  */
5606
5611
 
5607
- export { PROMPTBOOK_VERSION, createCollectionFromDirectory };
5612
+ /**
5613
+ * This error type indicates that you try to use a feature that is not available in the current environment
5614
+ */
5615
+ var EnvironmentMismatchError = /** @class */ (function (_super) {
5616
+ __extends(EnvironmentMismatchError, _super);
5617
+ function EnvironmentMismatchError(message) {
5618
+ var _this = _super.call(this, message) || this;
5619
+ _this.name = 'EnvironmentMismatchError';
5620
+ Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
5621
+ return _this;
5622
+ }
5623
+ return EnvironmentMismatchError;
5624
+ }(Error));
5625
+
5626
+ /**
5627
+ * Helper of usage compute
5628
+ *
5629
+ * @param content the content of prompt or response
5630
+ * @returns part of PromptResultUsageCounts
5631
+ *
5632
+ * @private internal util of LlmExecutionTools
5633
+ */
5634
+ function computeUsageCounts(content) {
5635
+ return {
5636
+ charactersCount: { value: countCharacters(content) },
5637
+ wordsCount: { value: countWords(content) },
5638
+ sentencesCount: { value: countSentences(content) },
5639
+ linesCount: { value: countLines(content) },
5640
+ paragraphsCount: { value: countParagraphs(content) },
5641
+ pagesCount: { value: countPages(content) },
5642
+ };
5643
+ }
5644
+
5645
+ /**
5646
+ * Make UncertainNumber
5647
+ *
5648
+ * @param value
5649
+ *
5650
+ * @private utility for initializating UncertainNumber
5651
+ */
5652
+ function uncertainNumber(value) {
5653
+ if (value === null || value === undefined || Number.isNaN(NaN)) {
5654
+ return { value: 0, isUncertain: true };
5655
+ }
5656
+ return { value: value };
5657
+ }
5658
+
5659
+ /**
5660
+ * Get current date in ISO 8601 format
5661
+ *
5662
+ * @private This is internal util of the promptbook
5663
+ */
5664
+ function getCurrentIsoDate() {
5665
+ return new Date().toISOString();
5666
+ }
5667
+
5668
+ /**
5669
+ * Function computeUsage will create price per one token based on the string value found on openai page
5670
+ *
5671
+ * @private within the repository, used only as internal helper for `OPENAI_MODELS`
5672
+ */
5673
+ function computeUsage(value) {
5674
+ var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
5675
+ return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
5676
+ }
5677
+
5678
+ /**
5679
+ * List of available Anthropic Claude models with pricing
5680
+ *
5681
+ * Note: Done at 2024-05-25
5682
+ *
5683
+ * @see https://docs.anthropic.com/en/docs/models-overview
5684
+ */
5685
+ var ANTHROPIC_CLAUDE_MODELS = [
5686
+ {
5687
+ modelVariant: 'CHAT',
5688
+ modelTitle: 'Claude 3 Opus',
5689
+ modelName: 'claude-3-opus-20240229',
5690
+ pricing: {
5691
+ prompt: computeUsage("$15.00 / 1M tokens"),
5692
+ output: computeUsage("$75.00 / 1M tokens"),
5693
+ },
5694
+ },
5695
+ {
5696
+ modelVariant: 'CHAT',
5697
+ modelTitle: 'Claude 3 Sonnet',
5698
+ modelName: 'claude-3-sonnet-20240229',
5699
+ pricing: {
5700
+ prompt: computeUsage("$3.00 / 1M tokens"),
5701
+ output: computeUsage("$15.00 / 1M tokens"),
5702
+ },
5703
+ },
5704
+ {
5705
+ modelVariant: 'CHAT',
5706
+ modelTitle: 'Claude 3 Haiku',
5707
+ modelName: ' claude-3-haiku-20240307',
5708
+ pricing: {
5709
+ prompt: computeUsage("$0.25 / 1M tokens"),
5710
+ output: computeUsage("$1.25 / 1M tokens"),
5711
+ },
5712
+ },
5713
+ {
5714
+ modelVariant: 'CHAT',
5715
+ modelTitle: 'Claude 2.1',
5716
+ modelName: 'claude-2.1',
5717
+ pricing: {
5718
+ prompt: computeUsage("$8.00 / 1M tokens"),
5719
+ output: computeUsage("$24.00 / 1M tokens"),
5720
+ },
5721
+ },
5722
+ {
5723
+ modelVariant: 'CHAT',
5724
+ modelTitle: 'Claude 2',
5725
+ modelName: 'claude-2.0',
5726
+ pricing: {
5727
+ prompt: computeUsage("$8.00 / 1M tokens"),
5728
+ output: computeUsage("$24.00 / 1M tokens"),
5729
+ },
5730
+ },
5731
+ {
5732
+ modelVariant: 'CHAT',
5733
+ modelTitle: ' Claude Instant 1.2',
5734
+ modelName: 'claude-instant-1.2',
5735
+ pricing: {
5736
+ prompt: computeUsage("$0.80 / 1M tokens"),
5737
+ output: computeUsage("$2.40 / 1M tokens"),
5738
+ },
5739
+ },
5740
+ // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
5741
+ ];
5742
+ /**
5743
+ * Note: [๐Ÿค–] Add models of new variant
5744
+ * TODO: [๐Ÿง ] !!! Add embedding models OR Anthropic has only chat+completion models?
5745
+ * TODO: [๐Ÿง ] Some mechanism to propagate unsureness
5746
+ * TODO: [๐Ÿง ][๐Ÿ‘ฎโ€โ™€๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
5747
+ * TODO: [๐Ÿ•š] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
5748
+ */
5749
+
5750
+ /**
5751
+ * Execution Tools for calling Anthropic Claude API.
5752
+ */
5753
+ var AnthropicClaudeExecutionTools = /** @class */ (function () {
5754
+ /**
5755
+ * Creates Anthropic Claude Execution Tools.
5756
+ *
5757
+ * @param options which are relevant are directly passed to the Anthropic Claude client
5758
+ */
5759
+ function AnthropicClaudeExecutionTools(options) {
5760
+ if (options === void 0) { options = {}; }
5761
+ this.options = options;
5762
+ // Note: Passing only Anthropic Claude relevant options to Anthropic constructor
5763
+ var anthropicOptions = __assign({}, options);
5764
+ delete anthropicOptions.isVerbose;
5765
+ this.client = new Anthropic(anthropicOptions);
5766
+ }
5767
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
5768
+ get: function () {
5769
+ return 'Anthropic Claude';
5770
+ },
5771
+ enumerable: false,
5772
+ configurable: true
5773
+ });
5774
+ Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
5775
+ get: function () {
5776
+ return 'Use all models provided by Anthropic Claude';
5777
+ },
5778
+ enumerable: false,
5779
+ configurable: true
5780
+ });
5781
+ /**
5782
+ * Calls Anthropic Claude API to use a chat model.
5783
+ */
5784
+ AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
5785
+ return __awaiter(this, void 0, void 0, function () {
5786
+ var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
5787
+ return __generator(this, function (_a) {
5788
+ switch (_a.label) {
5789
+ case 0:
5790
+ if (this.options.isVerbose) {
5791
+ console.info('๐Ÿ’ฌ Anthropic Claude callChatModel call');
5792
+ }
5793
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
5794
+ // TODO: [โ˜‚] Use here more modelRequirements
5795
+ if (modelRequirements.modelVariant !== 'CHAT') {
5796
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
5797
+ }
5798
+ rawRequest = {
5799
+ model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
5800
+ max_tokens: modelRequirements.maxTokens || 4096,
5801
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
5802
+ temperature: modelRequirements.temperature,
5803
+ system: modelRequirements.systemMessage,
5804
+ // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
5805
+ // <- Note: [๐Ÿง†]
5806
+ messages: [
5807
+ {
5808
+ role: 'user',
5809
+ content: replaceParameters(content, parameters),
5810
+ },
5811
+ ],
5812
+ // TODO: Is here some equivalent of user identification?> user: this.options.user,
5813
+ };
5814
+ start = getCurrentIsoDate();
5815
+ if (this.options.isVerbose) {
5816
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
5817
+ }
5818
+ return [4 /*yield*/, this.client.messages.create(rawRequest)];
5819
+ case 1:
5820
+ rawResponse = _a.sent();
5821
+ if (this.options.isVerbose) {
5822
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
5823
+ }
5824
+ if (!rawResponse.content[0]) {
5825
+ throw new PipelineExecutionError('No content from Anthropic Claude');
5826
+ }
5827
+ if (rawResponse.content.length > 1) {
5828
+ throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
5829
+ }
5830
+ resultContent = rawResponse.content[0].text;
5831
+ // eslint-disable-next-line prefer-const
5832
+ complete = getCurrentIsoDate();
5833
+ usage = {
5834
+ price: { value: 0, isUncertain: true } /* <- TODO: [๐Ÿž] Compute usage */,
5835
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
5836
+ output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
5837
+ };
5838
+ return [2 /*return*/, {
5839
+ content: resultContent,
5840
+ modelName: rawResponse.model,
5841
+ timing: {
5842
+ start: start,
5843
+ complete: complete,
5844
+ },
5845
+ usage: usage,
5846
+ rawResponse: rawResponse,
5847
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
5848
+ }];
5849
+ }
5850
+ });
5851
+ });
5852
+ };
5853
+ /*
5854
+ TODO: [๐Ÿ‘]
5855
+ public async callCompletionModel(
5856
+ prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
5857
+ ): Promise<PromptCompletionResult> {
5858
+
5859
+ if (this.options.isVerbose) {
5860
+ console.info('๐Ÿ–‹ Anthropic Claude callCompletionModel call');
5861
+ }
5862
+
5863
+ const { content, parameters, modelRequirements } = prompt;
5864
+
5865
+ // TODO: [โ˜‚] Use here more modelRequirements
5866
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
5867
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
5868
+ }
5869
+
5870
+ const model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
5871
+ const modelSettings = {
5872
+ model: rawResponse.model || model,
5873
+ max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
5874
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
5875
+ // <- TODO: Use here `systemMessage`, `temperature` and `seed`
5876
+ };
5877
+
5878
+ const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
5879
+ ...modelSettings,
5880
+ prompt: replaceParameters(content, parameters),
5881
+ user: this.options.user,
5882
+ };
5883
+ const start: string_date_iso8601 = getCurrentIsoDate();
5884
+ let complete: string_date_iso8601;
5885
+
5886
+ if (this.options.isVerbose) {
5887
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
5888
+ }
5889
+ const rawResponse = await this.client.completions.create(rawRequest);
5890
+ if (this.options.isVerbose) {
5891
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
5892
+ }
5893
+
5894
+ if (!rawResponse.choices[0]) {
5895
+ throw new PipelineExecutionError('No choises from Anthropic Claude');
5896
+ }
5897
+
5898
+ if (rawResponse.choices.length > 1) {
5899
+ // TODO: This should be maybe only warning
5900
+ throw new PipelineExecutionError('More than one choise from Anthropic Claude');
5901
+ }
5902
+
5903
+ const resultContent = rawResponse.choices[0].text;
5904
+ // eslint-disable-next-line prefer-const
5905
+ complete = getCurrentIsoDate();
5906
+ const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [๐Ÿž] Compute usage * / } satisfies PromptResultUsage;
5907
+
5908
+
5909
+
5910
+ return {
5911
+ content: resultContent,
5912
+ modelName: rawResponse.model || model,
5913
+ timing: {
5914
+ start,
5915
+ complete,
5916
+ },
5917
+ usage,
5918
+ rawResponse,
5919
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
5920
+ };
5921
+ }
5922
+ */
5923
+ // <- Note: [๐Ÿค–] callXxxModel
5924
+ /**
5925
+ * Get the model that should be used as default
5926
+ */
5927
+ AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
5928
+ var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
5929
+ var modelName = _a.modelName;
5930
+ return modelName.startsWith(defaultModelName);
5931
+ });
5932
+ if (model === undefined) {
5933
+ throw new UnexpectedError(spaceTrim(function (block) {
5934
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
5935
+ var modelName = _a.modelName;
5936
+ return "- \"".concat(modelName, "\"");
5937
+ }).join('\n')), "\n\n ");
5938
+ }));
5939
+ }
5940
+ return model;
5941
+ };
5942
+ /**
5943
+ * Default model for chat variant.
5944
+ */
5945
+ AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
5946
+ return this.getDefaultModel('claude-3-opus');
5947
+ };
5948
+ // <- Note: [๐Ÿค–] getDefaultXxxModel
5949
+ /**
5950
+ * List all available Anthropic Claude models that can be used
5951
+ */
5952
+ AnthropicClaudeExecutionTools.prototype.listModels = function () {
5953
+ return ANTHROPIC_CLAUDE_MODELS;
5954
+ };
5955
+ return AnthropicClaudeExecutionTools;
5956
+ }());
5957
+ /**
5958
+ * TODO: !!!! [๐Ÿ†] JSON mode
5959
+ * TODO: [๐Ÿง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
5960
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
5961
+ * TODO: Maybe make custom OpenaiError
5962
+ * TODO: [๐Ÿง ][๐Ÿˆ] Maybe use `isDeterministic` from options
5963
+ */
5964
+
5965
+ /**
5966
+ * List of available OpenAI models with pricing
5967
+ *
5968
+ * Note: Done at 2024-05-20
5969
+ *
5970
+ * @see https://platform.openai.com/docs/models/
5971
+ * @see https://openai.com/api/pricing/
5972
+ */
5973
+ var OPENAI_MODELS = [
5974
+ /*/
5975
+ {
5976
+ modelTitle: 'dall-e-3',
5977
+ modelName: 'dall-e-3',
5978
+ },
5979
+ /**/
5980
+ /*/
5981
+ {
5982
+ modelTitle: 'whisper-1',
5983
+ modelName: 'whisper-1',
5984
+ },
5985
+ /**/
5986
+ /**/
5987
+ {
5988
+ modelVariant: 'COMPLETION',
5989
+ modelTitle: 'davinci-002',
5990
+ modelName: 'davinci-002',
5991
+ pricing: {
5992
+ prompt: computeUsage("$2.00 / 1M tokens"),
5993
+ output: computeUsage("$2.00 / 1M tokens"), // <- not sure
5994
+ },
5995
+ },
5996
+ /**/
5997
+ /*/
5998
+ {
5999
+ modelTitle: 'dall-e-2',
6000
+ modelName: 'dall-e-2',
6001
+ },
6002
+ /**/
6003
+ /**/
6004
+ {
6005
+ modelVariant: 'CHAT',
6006
+ modelTitle: 'gpt-3.5-turbo-16k',
6007
+ modelName: 'gpt-3.5-turbo-16k',
6008
+ pricing: {
6009
+ prompt: computeUsage("$3.00 / 1M tokens"),
6010
+ output: computeUsage("$4.00 / 1M tokens"),
6011
+ },
6012
+ },
6013
+ /**/
6014
+ /*/
6015
+ {
6016
+ modelTitle: 'tts-1-hd-1106',
6017
+ modelName: 'tts-1-hd-1106',
6018
+ },
6019
+ /**/
6020
+ /*/
6021
+ {
6022
+ modelTitle: 'tts-1-hd',
6023
+ modelName: 'tts-1-hd',
6024
+ },
6025
+ /**/
6026
+ /**/
6027
+ {
6028
+ modelVariant: 'CHAT',
6029
+ modelTitle: 'gpt-4',
6030
+ modelName: 'gpt-4',
6031
+ pricing: {
6032
+ prompt: computeUsage("$30.00 / 1M tokens"),
6033
+ output: computeUsage("$60.00 / 1M tokens"),
6034
+ },
6035
+ },
6036
+ /**/
6037
+ /**/
6038
+ {
6039
+ modelVariant: 'CHAT',
6040
+ modelTitle: 'gpt-4-32k',
6041
+ modelName: 'gpt-4-32k',
6042
+ pricing: {
6043
+ prompt: computeUsage("$60.00 / 1M tokens"),
6044
+ output: computeUsage("$120.00 / 1M tokens"),
6045
+ },
6046
+ },
6047
+ /**/
6048
+ /*/
6049
+ {
6050
+ modelVariant: 'CHAT',
6051
+ modelTitle: 'gpt-4-0613',
6052
+ modelName: 'gpt-4-0613',
6053
+ pricing: {
6054
+ prompt: computeUsage(` / 1M tokens`),
6055
+ output: computeUsage(` / 1M tokens`),
6056
+ },
6057
+ },
6058
+ /**/
6059
+ /**/
6060
+ {
6061
+ modelVariant: 'CHAT',
6062
+ modelTitle: 'gpt-4-turbo-2024-04-09',
6063
+ modelName: 'gpt-4-turbo-2024-04-09',
6064
+ pricing: {
6065
+ prompt: computeUsage("$10.00 / 1M tokens"),
6066
+ output: computeUsage("$30.00 / 1M tokens"),
6067
+ },
6068
+ },
6069
+ /**/
6070
+ /**/
6071
+ {
6072
+ modelVariant: 'CHAT',
6073
+ modelTitle: 'gpt-3.5-turbo-1106',
6074
+ modelName: 'gpt-3.5-turbo-1106',
6075
+ pricing: {
6076
+ prompt: computeUsage("$1.00 / 1M tokens"),
6077
+ output: computeUsage("$2.00 / 1M tokens"),
6078
+ },
6079
+ },
6080
+ /**/
6081
+ /**/
6082
+ {
6083
+ modelVariant: 'CHAT',
6084
+ modelTitle: 'gpt-4-turbo',
6085
+ modelName: 'gpt-4-turbo',
6086
+ pricing: {
6087
+ prompt: computeUsage("$10.00 / 1M tokens"),
6088
+ output: computeUsage("$30.00 / 1M tokens"),
6089
+ },
6090
+ },
6091
+ /**/
6092
+ /**/
6093
+ {
6094
+ modelVariant: 'COMPLETION',
6095
+ modelTitle: 'gpt-3.5-turbo-instruct-0914',
6096
+ modelName: 'gpt-3.5-turbo-instruct-0914',
6097
+ pricing: {
6098
+ prompt: computeUsage("$1.50 / 1M tokens"),
6099
+ output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
6100
+ },
6101
+ },
6102
+ /**/
6103
+ /**/
6104
+ {
6105
+ modelVariant: 'COMPLETION',
6106
+ modelTitle: 'gpt-3.5-turbo-instruct',
6107
+ modelName: 'gpt-3.5-turbo-instruct',
6108
+ pricing: {
6109
+ prompt: computeUsage("$1.50 / 1M tokens"),
6110
+ output: computeUsage("$2.00 / 1M tokens"),
6111
+ },
6112
+ },
6113
+ /**/
6114
+ /*/
6115
+ {
6116
+ modelTitle: 'tts-1',
6117
+ modelName: 'tts-1',
6118
+ },
6119
+ /**/
6120
+ /**/
6121
+ {
6122
+ modelVariant: 'CHAT',
6123
+ modelTitle: 'gpt-3.5-turbo',
6124
+ modelName: 'gpt-3.5-turbo',
6125
+ pricing: {
6126
+ prompt: computeUsage("$3.00 / 1M tokens"),
6127
+ output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
6128
+ },
6129
+ },
6130
+ /**/
6131
+ /**/
6132
+ {
6133
+ modelVariant: 'CHAT',
6134
+ modelTitle: 'gpt-3.5-turbo-0301',
6135
+ modelName: 'gpt-3.5-turbo-0301',
6136
+ pricing: {
6137
+ prompt: computeUsage("$1.50 / 1M tokens"),
6138
+ output: computeUsage("$2.00 / 1M tokens"),
6139
+ },
6140
+ },
6141
+ /**/
6142
+ /**/
6143
+ {
6144
+ modelVariant: 'COMPLETION',
6145
+ modelTitle: 'babbage-002',
6146
+ modelName: 'babbage-002',
6147
+ pricing: {
6148
+ prompt: computeUsage("$0.40 / 1M tokens"),
6149
+ output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
6150
+ },
6151
+ },
6152
+ /**/
6153
+ /**/
6154
+ {
6155
+ modelVariant: 'CHAT',
6156
+ modelTitle: 'gpt-4-1106-preview',
6157
+ modelName: 'gpt-4-1106-preview',
6158
+ pricing: {
6159
+ prompt: computeUsage("$10.00 / 1M tokens"),
6160
+ output: computeUsage("$30.00 / 1M tokens"),
6161
+ },
6162
+ },
6163
+ /**/
6164
+ /**/
6165
+ {
6166
+ modelVariant: 'CHAT',
6167
+ modelTitle: 'gpt-4-0125-preview',
6168
+ modelName: 'gpt-4-0125-preview',
6169
+ pricing: {
6170
+ prompt: computeUsage("$10.00 / 1M tokens"),
6171
+ output: computeUsage("$30.00 / 1M tokens"),
6172
+ },
6173
+ },
6174
+ /**/
6175
+ /*/
6176
+ {
6177
+ modelTitle: 'tts-1-1106',
6178
+ modelName: 'tts-1-1106',
6179
+ },
6180
+ /**/
6181
+ /**/
6182
+ {
6183
+ modelVariant: 'CHAT',
6184
+ modelTitle: 'gpt-3.5-turbo-0125',
6185
+ modelName: 'gpt-3.5-turbo-0125',
6186
+ pricing: {
6187
+ prompt: computeUsage("$0.50 / 1M tokens"),
6188
+ output: computeUsage("$1.50 / 1M tokens"),
6189
+ },
6190
+ },
6191
+ /**/
6192
+ /**/
6193
+ {
6194
+ modelVariant: 'CHAT',
6195
+ modelTitle: 'gpt-4-turbo-preview',
6196
+ modelName: 'gpt-4-turbo-preview',
6197
+ pricing: {
6198
+ prompt: computeUsage("$10.00 / 1M tokens"),
6199
+ output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
6200
+ },
6201
+ },
6202
+ /**/
6203
+ /**/
6204
+ {
6205
+ modelVariant: 'EMBEDDING',
6206
+ modelTitle: 'text-embedding-3-large',
6207
+ modelName: 'text-embedding-3-large',
6208
+ pricing: {
6209
+ prompt: computeUsage("$0.13 / 1M tokens"),
6210
+ // TODO: [๐Ÿ] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
6211
+ output: 0, // <- Note: [๐Ÿ†–] In Embedding models you dont pay for output
6212
+ },
6213
+ },
6214
+ /**/
6215
+ /**/
6216
+ {
6217
+ modelVariant: 'EMBEDDING',
6218
+ modelTitle: 'text-embedding-3-small',
6219
+ modelName: 'text-embedding-3-small',
6220
+ pricing: {
6221
+ prompt: computeUsage("$0.02 / 1M tokens"),
6222
+ // TODO: [๐Ÿ] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
6223
+ output: 0, // <- Note: [๐Ÿ†–] In Embedding models you dont pay for output
6224
+ },
6225
+ },
6226
+ /**/
6227
+ /**/
6228
+ {
6229
+ modelVariant: 'CHAT',
6230
+ modelTitle: 'gpt-3.5-turbo-0613',
6231
+ modelName: 'gpt-3.5-turbo-0613',
6232
+ pricing: {
6233
+ prompt: computeUsage("$1.50 / 1M tokens"),
6234
+ output: computeUsage("$2.00 / 1M tokens"),
6235
+ },
6236
+ },
6237
+ /**/
6238
+ /**/
6239
+ {
6240
+ modelVariant: 'EMBEDDING',
6241
+ modelTitle: 'text-embedding-ada-002',
6242
+ modelName: 'text-embedding-ada-002',
6243
+ pricing: {
6244
+ prompt: computeUsage("$0.1 / 1M tokens"),
6245
+ // TODO: [๐Ÿ] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
6246
+ output: 0, // <- Note: [๐Ÿ†–] In Embedding models you dont pay for output
6247
+ },
6248
+ },
6249
+ /**/
6250
+ /*/
6251
+ {
6252
+ modelVariant: 'CHAT',
6253
+ modelTitle: 'gpt-4-1106-vision-preview',
6254
+ modelName: 'gpt-4-1106-vision-preview',
6255
+ },
6256
+ /**/
6257
+ /*/
6258
+ {
6259
+ modelVariant: 'CHAT',
6260
+ modelTitle: 'gpt-4-vision-preview',
6261
+ modelName: 'gpt-4-vision-preview',
6262
+ pricing: {
6263
+ prompt: computeUsage(`$10.00 / 1M tokens`),
6264
+ output: computeUsage(`$30.00 / 1M tokens`),
6265
+ },
6266
+ },
6267
+ /**/
6268
+ /**/
6269
+ {
6270
+ modelVariant: 'CHAT',
6271
+ modelTitle: 'gpt-4o-2024-05-13',
6272
+ modelName: 'gpt-4o-2024-05-13',
6273
+ pricing: {
6274
+ prompt: computeUsage("$5.00 / 1M tokens"),
6275
+ output: computeUsage("$15.00 / 1M tokens"),
6276
+ },
6277
+ },
6278
+ /**/
6279
+ /**/
6280
+ {
6281
+ modelVariant: 'CHAT',
6282
+ modelTitle: 'gpt-4o',
6283
+ modelName: 'gpt-4o',
6284
+ pricing: {
6285
+ prompt: computeUsage("$5.00 / 1M tokens"),
6286
+ output: computeUsage("$15.00 / 1M tokens"),
6287
+ },
6288
+ },
6289
+ /**/
6290
+ /**/
6291
+ {
6292
+ modelVariant: 'CHAT',
6293
+ modelTitle: 'gpt-3.5-turbo-16k-0613',
6294
+ modelName: 'gpt-3.5-turbo-16k-0613',
6295
+ pricing: {
6296
+ prompt: computeUsage("$3.00 / 1M tokens"),
6297
+ output: computeUsage("$4.00 / 1M tokens"),
6298
+ },
6299
+ },
6300
+ /**/
6301
+ ];
6302
+ /**
6303
+ * Note: [๐Ÿค–] Add models of new variant
6304
+ * TODO: [๐Ÿง ] Some mechanism to propagate unsureness
6305
+ * TODO: [๐Ÿ•š][๐Ÿ‘ฎโ€โ™€๏ธ] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
6306
+ * TODO: [๐Ÿง ][๐Ÿ‘ฎโ€โ™€๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6307
+ * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
6308
+ * @see https://openai.com/api/pricing/
6309
+ * @see /other/playground/playground.ts
6310
+ * TODO: [๐Ÿ“] Make better
6311
+ * TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
6312
+ * TODO: [๐Ÿšธ] Not all models are compatible with JSON mode, add this information here and use it
6313
+ */
6314
+
6315
+ /**
6316
+ * Computes the usage of the OpenAI API based on the response from OpenAI
6317
+ *
6318
+ * @param promptContent The content of the prompt
6319
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6320
+ * @param rawResponse The raw response from OpenAI API
6321
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
6322
+ * @private internal util of `OpenAiExecutionTools`
6323
+ */
6324
+ function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6325
+ resultContent, rawResponse) {
6326
+ var _a, _b;
6327
+ if (rawResponse.usage === undefined) {
6328
+ throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
6329
+ }
6330
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
6331
+ throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
6332
+ }
6333
+ var inputTokens = rawResponse.usage.prompt_tokens;
6334
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
6335
+ var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6336
+ var price;
6337
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6338
+ price = uncertainNumber();
6339
+ }
6340
+ else {
6341
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6342
+ }
6343
+ return {
6344
+ price: price,
6345
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
6346
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6347
+ };
6348
+ }
6349
+
6350
+ /**
6351
+ * Execution Tools for calling OpenAI API.
6352
+ */
6353
+ var OpenAiExecutionTools = /** @class */ (function () {
6354
+ /**
6355
+ * Creates OpenAI Execution Tools.
6356
+ *
6357
+ * @param options which are relevant are directly passed to the OpenAI client
6358
+ */
6359
+ function OpenAiExecutionTools(options) {
6360
+ if (options === void 0) { options = {}; }
6361
+ this.options = options;
6362
+ // Note: Passing only OpenAI relevant options to OpenAI constructor
6363
+ var openAiOptions = __assign({}, options);
6364
+ delete openAiOptions.isVerbose;
6365
+ delete openAiOptions.user;
6366
+ this.client = new OpenAI(__assign({}, openAiOptions));
6367
+ }
6368
+ Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
6369
+ get: function () {
6370
+ return 'OpenAI';
6371
+ },
6372
+ enumerable: false,
6373
+ configurable: true
6374
+ });
6375
+ Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
6376
+ get: function () {
6377
+ return 'Use all models provided by OpenAI';
6378
+ },
6379
+ enumerable: false,
6380
+ configurable: true
6381
+ });
6382
+ /**
6383
+ * Calls OpenAI API to use a chat model.
6384
+ */
6385
+ OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
6386
+ return __awaiter(this, void 0, void 0, function () {
6387
+ var content, parameters, modelRequirements, expectFormat, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6388
+ return __generator(this, function (_a) {
6389
+ switch (_a.label) {
6390
+ case 0:
6391
+ if (this.options.isVerbose) {
6392
+ console.info('๐Ÿ’ฌ OpenAI callChatModel call', { prompt: prompt });
6393
+ }
6394
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
6395
+ // TODO: [โ˜‚] Use here more modelRequirements
6396
+ if (modelRequirements.modelVariant !== 'CHAT') {
6397
+ throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
6398
+ }
6399
+ model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
6400
+ modelSettings = {
6401
+ model: model,
6402
+ max_tokens: modelRequirements.maxTokens,
6403
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
6404
+ temperature: modelRequirements.temperature,
6405
+ // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6406
+ // <- Note: [๐Ÿง†]
6407
+ };
6408
+ if (expectFormat === 'JSON') {
6409
+ modelSettings.response_format = {
6410
+ type: 'json_object',
6411
+ };
6412
+ }
6413
+ rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
6414
+ ? []
6415
+ : [
6416
+ {
6417
+ role: 'system',
6418
+ content: modelRequirements.systemMessage,
6419
+ },
6420
+ ])), false), [
6421
+ {
6422
+ role: 'user',
6423
+ content: replaceParameters(content, parameters),
6424
+ },
6425
+ ], false), user: this.options.user });
6426
+ start = getCurrentIsoDate();
6427
+ if (this.options.isVerbose) {
6428
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6429
+ }
6430
+ return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
6431
+ case 1:
6432
+ rawResponse = _a.sent();
6433
+ if (this.options.isVerbose) {
6434
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6435
+ }
6436
+ if (!rawResponse.choices[0]) {
6437
+ throw new PipelineExecutionError('No choises from OpenAI');
6438
+ }
6439
+ if (rawResponse.choices.length > 1) {
6440
+ // TODO: This should be maybe only warning
6441
+ throw new PipelineExecutionError('More than one choise from OpenAI');
6442
+ }
6443
+ resultContent = rawResponse.choices[0].message.content;
6444
+ // eslint-disable-next-line prefer-const
6445
+ complete = getCurrentIsoDate();
6446
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
6447
+ if (resultContent === null) {
6448
+ throw new PipelineExecutionError('No response message from OpenAI');
6449
+ }
6450
+ return [2 /*return*/, {
6451
+ content: resultContent,
6452
+ modelName: rawResponse.model || model,
6453
+ timing: {
6454
+ start: start,
6455
+ complete: complete,
6456
+ },
6457
+ usage: usage,
6458
+ rawResponse: rawResponse,
6459
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
6460
+ }];
6461
+ }
6462
+ });
6463
+ });
6464
+ };
6465
+ /**
6466
+ * Calls OpenAI API to use a complete model.
6467
+ */
6468
+ OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
6469
+ return __awaiter(this, void 0, void 0, function () {
6470
+ var content, parameters, modelRequirements, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
6471
+ return __generator(this, function (_a) {
6472
+ switch (_a.label) {
6473
+ case 0:
6474
+ if (this.options.isVerbose) {
6475
+ console.info('๐Ÿ–‹ OpenAI callCompletionModel call', { prompt: prompt });
6476
+ }
6477
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6478
+ // TODO: [โ˜‚] Use here more modelRequirements
6479
+ if (modelRequirements.modelVariant !== 'COMPLETION') {
6480
+ throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
6481
+ }
6482
+ model = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
6483
+ modelSettings = {
6484
+ model: model,
6485
+ max_tokens: modelRequirements.maxTokens || 2000,
6486
+ // <- TODO: [๐ŸŒพ] Make some global max cap for maxTokens
6487
+ temperature: modelRequirements.temperature,
6488
+ // <- TODO: [๐Ÿˆ] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
6489
+ // <- Note: [๐Ÿง†]
6490
+ };
6491
+ rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
6492
+ start = getCurrentIsoDate();
6493
+ if (this.options.isVerbose) {
6494
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6495
+ }
6496
+ return [4 /*yield*/, this.client.completions.create(rawRequest)];
6497
+ case 1:
6498
+ rawResponse = _a.sent();
6499
+ if (this.options.isVerbose) {
6500
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6501
+ }
6502
+ if (!rawResponse.choices[0]) {
6503
+ throw new PipelineExecutionError('No choises from OpenAI');
6504
+ }
6505
+ if (rawResponse.choices.length > 1) {
6506
+ // TODO: This should be maybe only warning
6507
+ throw new PipelineExecutionError('More than one choise from OpenAI');
6508
+ }
6509
+ resultContent = rawResponse.choices[0].text;
6510
+ // eslint-disable-next-line prefer-const
6511
+ complete = getCurrentIsoDate();
6512
+ usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
6513
+ return [2 /*return*/, {
6514
+ content: resultContent,
6515
+ modelName: rawResponse.model || model,
6516
+ timing: {
6517
+ start: start,
6518
+ complete: complete,
6519
+ },
6520
+ usage: usage,
6521
+ rawResponse: rawResponse,
6522
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
6523
+ }];
6524
+ }
6525
+ });
6526
+ });
6527
+ };
6528
+ /**
6529
+ * Calls OpenAI API to use a embedding model
6530
+ */
6531
+ OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
6532
+ return __awaiter(this, void 0, void 0, function () {
6533
+ var content, parameters, modelRequirements, model, rawRequest, start, complete, rawResponse, resultContent, usage;
6534
+ return __generator(this, function (_a) {
6535
+ switch (_a.label) {
6536
+ case 0:
6537
+ if (this.options.isVerbose) {
6538
+ console.info('๐Ÿ–‹ OpenAI embedding call', { prompt: prompt });
6539
+ }
6540
+ content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
6541
+ // TODO: [โ˜‚] Use here more modelRequirements
6542
+ if (modelRequirements.modelVariant !== 'EMBEDDING') {
6543
+ throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
6544
+ }
6545
+ model = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
6546
+ rawRequest = {
6547
+ input: replaceParameters(content, parameters),
6548
+ model: model,
6549
+ // TODO: !!!! Test model 3 and dimensions
6550
+ };
6551
+ start = getCurrentIsoDate();
6552
+ if (this.options.isVerbose) {
6553
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6554
+ }
6555
+ return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
6556
+ case 1:
6557
+ rawResponse = _a.sent();
6558
+ if (this.options.isVerbose) {
6559
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6560
+ }
6561
+ if (rawResponse.data.length !== 1) {
6562
+ throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
6563
+ }
6564
+ resultContent = rawResponse.data[0].embedding;
6565
+ // eslint-disable-next-line prefer-const
6566
+ complete = getCurrentIsoDate();
6567
+ usage = computeOpenaiUsage(content, '', rawResponse);
6568
+ return [2 /*return*/, {
6569
+ content: resultContent,
6570
+ modelName: rawResponse.model || model,
6571
+ timing: {
6572
+ start: start,
6573
+ complete: complete,
6574
+ },
6575
+ usage: usage,
6576
+ rawResponse: rawResponse,
6577
+ // <- [๐Ÿคนโ€โ™‚๏ธ]
6578
+ }];
6579
+ }
6580
+ });
6581
+ });
6582
+ };
6583
+ // <- Note: [๐Ÿค–] callXxxModel
6584
+ /**
6585
+ * Get the model that should be used as default
6586
+ */
6587
+ OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
6588
+ var model = OPENAI_MODELS.find(function (_a) {
6589
+ var modelName = _a.modelName;
6590
+ return modelName === defaultModelName;
6591
+ });
6592
+ if (model === undefined) {
6593
+ throw new UnexpectedError(spaceTrim(function (block) {
6594
+ return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
6595
+ var modelName = _a.modelName;
6596
+ return "- \"".concat(modelName, "\"");
6597
+ }).join('\n')), "\n\n ");
6598
+ }));
6599
+ }
6600
+ return model;
6601
+ };
6602
+ /**
6603
+ * Default model for chat variant.
6604
+ */
6605
+ OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
6606
+ return this.getDefaultModel('gpt-4o');
6607
+ };
6608
+ /**
6609
+ * Default model for completion variant.
6610
+ */
6611
+ OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
6612
+ return this.getDefaultModel('gpt-3.5-turbo-instruct');
6613
+ };
6614
+ /**
6615
+ * Default model for completion variant.
6616
+ */
6617
+ OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
6618
+ return this.getDefaultModel('text-embedding-3-large');
6619
+ };
6620
+ // <- Note: [๐Ÿค–] getDefaultXxxModel
6621
+ /**
6622
+ * List all available OpenAI models that can be used
6623
+ */
6624
+ OpenAiExecutionTools.prototype.listModels = function () {
6625
+ /*
6626
+ Note: Dynamic lising of the models
6627
+ const models = await this.openai.models.list({});
6628
+
6629
+ console.log({ models });
6630
+ console.log(models.data);
6631
+ */
6632
+ return OPENAI_MODELS;
6633
+ };
6634
+ return OpenAiExecutionTools;
6635
+ }());
6636
+ /**
6637
+ * TODO: [๐Ÿง ][๐Ÿง™โ€โ™‚๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
6638
+ * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6639
+ * TODO: Maybe make custom OpenaiError
6640
+ * TODO: [๐Ÿง ][๐Ÿˆ] Maybe use `isDeterministic` from options
6641
+ */
6642
+
6643
+ /**
6644
+ * @@@
6645
+ *
6646
+ * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
6647
+ *
6648
+ * It looks for environment variables:
6649
+ * - `process.env.OPENAI_API_KEY`
6650
+ * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
6651
+ *
6652
+ * @returns @@@
6653
+ */
6654
+ function createLlmToolsFromEnv(options) {
6655
+ if (options === void 0) { options = {}; }
6656
+ if (!isRunningInNode()) {
6657
+ throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6658
+ }
6659
+ var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
6660
+ var llmTools = [];
6661
+ if (typeof process.env.OPENAI_API_KEY === 'string') {
6662
+ llmTools.push(new OpenAiExecutionTools({
6663
+ isVerbose: isVerbose,
6664
+ apiKey: process.env.OPENAI_API_KEY,
6665
+ }));
6666
+ }
6667
+ if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
6668
+ llmTools.push(new AnthropicClaudeExecutionTools({
6669
+ isVerbose: isVerbose,
6670
+ apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
6671
+ }));
6672
+ }
6673
+ if (llmTools.length === 0) {
6674
+ throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
6675
+ }
6676
+ else if (llmTools.length === 1) {
6677
+ return llmTools[0];
6678
+ }
6679
+ else {
6680
+ return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
6681
+ }
6682
+ }
6683
+ /**
6684
+ * TODO: [๐Ÿ”ผ] !!! Export via `@promptbook/node`
6685
+ * TODO: @@@ write discussion about this - wizzard
6686
+ * TODO: Add Azure
6687
+ * TODO: [๐Ÿง ] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
6688
+ * TODO: [๐Ÿง ] Is there some meaningfull way how to test this util
6689
+ * TODO: [๐Ÿง ] Maybe pass env as argument
6690
+ * Note: [๐ŸŸข] This code should never be published outside of `@promptbook/node`
6691
+ */
6692
+
6693
+ export { PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };
5608
6694
  //# sourceMappingURL=index.es.js.map