@promptbook/cli 0.89.0 → 0.92.0-4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -0
- package/esm/index.es.js +310 -64
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/_packages/deepseek.index.d.ts +2 -0
- package/esm/typings/src/_packages/google.index.d.ts +2 -0
- package/esm/typings/src/llm-providers/_common/filterModels.d.ts +15 -0
- package/esm/typings/src/llm-providers/deepseek/deepseek-models.d.ts +23 -0
- package/esm/typings/src/llm-providers/google/google-models.d.ts +23 -0
- package/esm/typings/src/personas/preparePersona.d.ts +1 -1
- package/esm/typings/src/pipeline/PipelineJson/PersonaJson.d.ts +4 -2
- package/package.json +1 -1
- package/umd/index.umd.js +310 -64
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -56,7 +56,7 @@
|
|
|
56
56
|
* @generated
|
|
57
57
|
* @see https://github.com/webgptorg/promptbook
|
|
58
58
|
*/
|
|
59
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.
|
|
59
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.92.0-4';
|
|
60
60
|
/**
|
|
61
61
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
62
62
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -4471,7 +4471,7 @@
|
|
|
4471
4471
|
* Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
|
|
4472
4472
|
*/
|
|
4473
4473
|
|
|
4474
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelRequirements",format:"JSON",dependentParameterNames:["availableModelNames","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModelNames}` List of available model names separated by comma (,)\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Example\n\n\\`\\`\\`json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
4474
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book",formfactorName:"GENERIC",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",resultingParameterName:"knowledgePieces",dependentParameterNames:["knowledgeContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge from Markdown\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.book`\n- INPUT PARAMETER `{knowledgeContent}` Markdown document content\n- OUTPUT PARAMETER `{knowledgePieces}` The knowledge JSON object\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}\n```\n\n`-> {knowledgePieces}`\n"}],sourceFile:"./books/prepare-knowledge-from-markdown.book"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",resultingParameterName:"keywords",dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Keywords\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-keywords.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{keywords}` Keywords separated by comma\n\n## Knowledge\n\n<!-- TODO: [🍆] -FORMAT JSON -->\n\n```markdown\nYou are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}\n```\n\n`-> {keywords}`\n"}],sourceFile:"./books/prepare-knowledge-keywords.book"},{title:"Prepare Knowledge-piece Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.book",formfactorName:"GENERIC",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"knowledge",title:"Knowledge",content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",resultingParameterName:"title",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Knowledge-piece Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-knowledge-title.book`\n- INPUT PARAMETER `{knowledgePieceContent}` The content\n- OUTPUT PARAMETER `{title}` The title of the document\n\n## Knowledge\n\n- EXPECT MIN 1 WORD\n- EXPECT MAX 8 WORDS\n\n```markdown\nYou are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-knowledge-title.book"},{title:"Prepare Persona",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.book",formfactorName:"GENERIC",parameters:[{name:"availableModels",description:"List of available model names together with their descriptions as JSON",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelsRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-model-requirements",title:"Make modelRequirements",content:"You are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n```json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n```\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n```json\n{availableModels}\n```\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",resultingParameterName:"modelsRequirements",format:"JSON",dependentParameterNames:["availableModels","personaDescription"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Persona\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-persona.book`\n- INPUT PARAMETER `{availableModels}` List of available model names together with their descriptions as JSON\n- INPUT PARAMETER `{personaDescription}` Description of the persona\n- OUTPUT PARAMETER `{modelsRequirements}` Specific requirements for the model\n\n## Make modelRequirements\n\n- FORMAT JSON\n\n```markdown\nYou are an experienced AI engineer, you need to find the best models for virtual assistants:\n\n## Example\n\n\\`\\`\\`json\n[\n {\n \"modelName\": \"gpt-4o\",\n \"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n \"temperature\": 0.7\n },\n {\n \"modelName\": \"claude-3-5-sonnet\",\n \"systemMessage\": \"You are a friendly and knowledgeable chatbot.\",\n \"temperature\": 0.5\n }\n]\n\\`\\`\\`\n\n## Instructions\n\n- Your output format is JSON array\n- Sort best-fitting models first\n- Omit any models that are not suitable\n- Write just the JSON, no other text should be present\n- Array contain items with following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nHere are the available models:\n\n\\`\\`\\`json\n{availableModels}\n\\`\\`\\`\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}\n```\n\n`-> {modelsRequirements}`\n"}],sourceFile:"./books/prepare-persona.book"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-title.book",formfactorName:"GENERIC",parameters:[{name:"book",description:"The book to prepare the title for",isInput:true,isOutput:false},{name:"title",description:"Best title for the book",isInput:false,isOutput:true}],tasks:[{taskType:"PROMPT_TASK",name:"make-title",title:"Make title",content:"Make best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}",resultingParameterName:"title",expectations:{words:{min:1,max:8},lines:{min:1,max:1}},dependentParameterNames:["book"]}],personas:[],preparations:[],knowledgeSources:[],knowledgePieces:[],sources:[{type:"BOOK",path:null,content:"# Prepare Title\n\n- PIPELINE URL `https://promptbook.studio/promptbook/prepare-title.book`\n- INPUT PARAMETER `{book}` The book to prepare the title for\n- OUTPUT PARAMETER `{title}` Best title for the book\n\n## Make title\n\n- EXPECT MIN 1 Word\n- EXPECT MAX 8 Words\n- EXPECT EXACTLY 1 Line\n\n```markdown\nMake best title for given text which describes the workflow:\n\n## Rules\n\n- Write just title, nothing else\n- Title should be concise and clear - Write maximum ideally 2 words, maximum 5 words\n- Title starts with emoticon\n- Title should not mention the input and output of the workflow but the main purpose of the workflow\n _For example, not \"✍ Convert Knowledge-piece to title\" but \"✍ Title\"_\n\n## The workflow\n\n> {book}\n```\n\n`-> {title}`\n"}],sourceFile:"./books/prepare-title.book"}];
|
|
4475
4475
|
|
|
4476
4476
|
/**
|
|
4477
4477
|
* Function isValidJsonString will tell you if the string is valid JSON or not
|
|
@@ -4732,7 +4732,7 @@
|
|
|
4732
4732
|
*/
|
|
4733
4733
|
function unpreparePipeline(pipeline) {
|
|
4734
4734
|
let { personas, knowledgeSources, tasks } = pipeline;
|
|
4735
|
-
personas = personas.map((persona) => ({ ...persona,
|
|
4735
|
+
personas = personas.map((persona) => ({ ...persona, modelsRequirements: undefined, preparationIds: undefined }));
|
|
4736
4736
|
knowledgeSources = knowledgeSources.map((knowledgeSource) => ({ ...knowledgeSource, preparationIds: undefined }));
|
|
4737
4737
|
tasks = tasks.map((task) => {
|
|
4738
4738
|
let { dependentParameterNames } = task;
|
|
@@ -4890,7 +4890,7 @@
|
|
|
4890
4890
|
if (pipeline.title === undefined || pipeline.title === '' || pipeline.title === DEFAULT_BOOK_TITLE) {
|
|
4891
4891
|
return false;
|
|
4892
4892
|
}
|
|
4893
|
-
if (!pipeline.personas.every((persona) => persona.
|
|
4893
|
+
if (!pipeline.personas.every((persona) => persona.modelsRequirements !== undefined)) {
|
|
4894
4894
|
return false;
|
|
4895
4895
|
}
|
|
4896
4896
|
if (!pipeline.knowledgeSources.every((knowledgeSource) => knowledgeSource.preparationIds !== undefined)) {
|
|
@@ -7050,27 +7050,40 @@
|
|
|
7050
7050
|
pipeline: await collection.getPipelineByUrl('https://promptbook.studio/promptbook/prepare-persona.book'),
|
|
7051
7051
|
tools,
|
|
7052
7052
|
});
|
|
7053
|
-
// TODO: [🚐] Make arrayable LLMs -> single LLM DRY
|
|
7054
7053
|
const _llms = arrayableToArray(tools.llm);
|
|
7055
7054
|
const llmTools = _llms.length === 1 ? _llms[0] : joinLlmExecutionTools(..._llms);
|
|
7056
|
-
const availableModels = await llmTools.listModels()
|
|
7057
|
-
const availableModelNames = availableModels
|
|
7055
|
+
const availableModels = (await llmTools.listModels())
|
|
7058
7056
|
.filter(({ modelVariant }) => modelVariant === 'CHAT')
|
|
7059
|
-
.map(({ modelName }) =>
|
|
7060
|
-
|
|
7061
|
-
|
|
7057
|
+
.map(({ modelName, modelDescription }) => ({
|
|
7058
|
+
modelName,
|
|
7059
|
+
modelDescription,
|
|
7060
|
+
// <- Note: `modelTitle` and `modelVariant` is not relevant for this task
|
|
7061
|
+
}));
|
|
7062
|
+
const result = await preparePersonaExecutor({
|
|
7063
|
+
availableModels /* <- Note: Passing as JSON */,
|
|
7064
|
+
personaDescription,
|
|
7065
|
+
}).asPromise();
|
|
7062
7066
|
const { outputParameters } = result;
|
|
7063
|
-
const {
|
|
7064
|
-
const
|
|
7067
|
+
const { modelsRequirements: modelsRequirementsJson } = outputParameters;
|
|
7068
|
+
const modelsRequirementsUnchecked = JSON.parse(modelsRequirementsJson);
|
|
7065
7069
|
if (isVerbose) {
|
|
7066
|
-
console.info(`PERSONA ${personaDescription}`,
|
|
7070
|
+
console.info(`PERSONA ${personaDescription}`, modelsRequirementsUnchecked);
|
|
7067
7071
|
}
|
|
7068
|
-
|
|
7069
|
-
|
|
7072
|
+
if (!Array.isArray(modelsRequirementsUnchecked)) {
|
|
7073
|
+
throw new UnexpectedError(spaceTrim__default["default"]((block) => `
|
|
7074
|
+
Invalid \`modelsRequirements\`:
|
|
7075
|
+
|
|
7076
|
+
\`\`\`json
|
|
7077
|
+
${block(JSON.stringify(modelsRequirementsUnchecked, null, 4))}
|
|
7078
|
+
\`\`\`
|
|
7079
|
+
`));
|
|
7080
|
+
}
|
|
7081
|
+
const modelsRequirements = modelsRequirementsUnchecked.map((modelRequirements) => ({
|
|
7070
7082
|
modelVariant: 'CHAT',
|
|
7071
|
-
|
|
7072
|
-
|
|
7073
|
-
|
|
7083
|
+
...modelRequirements,
|
|
7084
|
+
}));
|
|
7085
|
+
return {
|
|
7086
|
+
modelsRequirements,
|
|
7074
7087
|
};
|
|
7075
7088
|
}
|
|
7076
7089
|
/**
|
|
@@ -7497,14 +7510,14 @@
|
|
|
7497
7510
|
// TODO: [🖌][🧠] Implement some `mapAsync` function
|
|
7498
7511
|
const preparedPersonas = new Array(personas.length);
|
|
7499
7512
|
await forEachAsync(personas, { maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, async (persona, index) => {
|
|
7500
|
-
const
|
|
7513
|
+
const { modelsRequirements } = await preparePersona(persona.description, { ...tools, llm: llmToolsWithUsage }, {
|
|
7501
7514
|
rootDirname,
|
|
7502
7515
|
maxParallelCount /* <- TODO: [🪂] */,
|
|
7503
7516
|
isVerbose,
|
|
7504
7517
|
});
|
|
7505
7518
|
const preparedPersona = {
|
|
7506
7519
|
...persona,
|
|
7507
|
-
|
|
7520
|
+
modelsRequirements,
|
|
7508
7521
|
preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id],
|
|
7509
7522
|
// <- TODO: [🍙] Make some standard order of json properties
|
|
7510
7523
|
};
|
|
@@ -14177,6 +14190,7 @@
|
|
|
14177
14190
|
modelVariant: 'CHAT',
|
|
14178
14191
|
modelTitle: 'Claude 3.5 Sonnet',
|
|
14179
14192
|
modelName: 'claude-3-5-sonnet-20240620',
|
|
14193
|
+
modelDescription: 'Latest Claude model with great reasoning, coding, and language understanding capabilities. 200K context window. Optimized balance of intelligence and speed.',
|
|
14180
14194
|
pricing: {
|
|
14181
14195
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
14182
14196
|
output: computeUsage(`$15.00 / 1M tokens`),
|
|
@@ -14186,6 +14200,7 @@
|
|
|
14186
14200
|
modelVariant: 'CHAT',
|
|
14187
14201
|
modelTitle: 'Claude 3 Opus',
|
|
14188
14202
|
modelName: 'claude-3-opus-20240229',
|
|
14203
|
+
modelDescription: 'Most capable Claude model excelling at complex reasoning, coding, and detailed instruction following. 200K context window. Best for sophisticated tasks requiring nuanced understanding.',
|
|
14189
14204
|
pricing: {
|
|
14190
14205
|
prompt: computeUsage(`$15.00 / 1M tokens`),
|
|
14191
14206
|
output: computeUsage(`$75.00 / 1M tokens`),
|
|
@@ -14195,6 +14210,7 @@
|
|
|
14195
14210
|
modelVariant: 'CHAT',
|
|
14196
14211
|
modelTitle: 'Claude 3 Sonnet',
|
|
14197
14212
|
modelName: 'claude-3-sonnet-20240229',
|
|
14213
|
+
modelDescription: 'Strong general-purpose model with excellent performance across reasoning, conversation, and coding tasks. 200K context window. Good balance of intelligence and cost-efficiency.',
|
|
14198
14214
|
pricing: {
|
|
14199
14215
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
14200
14216
|
output: computeUsage(`$15.00 / 1M tokens`),
|
|
@@ -14204,6 +14220,7 @@
|
|
|
14204
14220
|
modelVariant: 'CHAT',
|
|
14205
14221
|
modelTitle: 'Claude 3 Haiku',
|
|
14206
14222
|
modelName: ' claude-3-haiku-20240307',
|
|
14223
|
+
modelDescription: 'Fastest and most compact Claude model optimized for responsiveness in interactive applications. 200K context window. Excellent for quick responses and lightweight applications.',
|
|
14207
14224
|
pricing: {
|
|
14208
14225
|
prompt: computeUsage(`$0.25 / 1M tokens`),
|
|
14209
14226
|
output: computeUsage(`$1.25 / 1M tokens`),
|
|
@@ -14213,6 +14230,7 @@
|
|
|
14213
14230
|
modelVariant: 'CHAT',
|
|
14214
14231
|
modelTitle: 'Claude 2.1',
|
|
14215
14232
|
modelName: 'claude-2.1',
|
|
14233
|
+
modelDescription: 'Improved version of Claude 2 with better performance across reasoning and truthfulness. 100K context window. Legacy model with strong reliability.',
|
|
14216
14234
|
pricing: {
|
|
14217
14235
|
prompt: computeUsage(`$8.00 / 1M tokens`),
|
|
14218
14236
|
output: computeUsage(`$24.00 / 1M tokens`),
|
|
@@ -14222,6 +14240,7 @@
|
|
|
14222
14240
|
modelVariant: 'CHAT',
|
|
14223
14241
|
modelTitle: 'Claude 2',
|
|
14224
14242
|
modelName: 'claude-2.0',
|
|
14243
|
+
modelDescription: 'Legacy model with strong general reasoning and language capabilities. 100K context window. Superseded by newer Claude 3 models.',
|
|
14225
14244
|
pricing: {
|
|
14226
14245
|
prompt: computeUsage(`$8.00 / 1M tokens`),
|
|
14227
14246
|
output: computeUsage(`$24.00 / 1M tokens`),
|
|
@@ -14229,8 +14248,9 @@
|
|
|
14229
14248
|
},
|
|
14230
14249
|
{
|
|
14231
14250
|
modelVariant: 'CHAT',
|
|
14232
|
-
modelTitle: '
|
|
14251
|
+
modelTitle: 'Claude Instant 1.2',
|
|
14233
14252
|
modelName: 'claude-instant-1.2',
|
|
14253
|
+
modelDescription: 'Older, faster Claude model optimized for high throughput applications. Lower cost but less capable than newer models. 100K context window.',
|
|
14234
14254
|
pricing: {
|
|
14235
14255
|
prompt: computeUsage(`$0.80 / 1M tokens`),
|
|
14236
14256
|
output: computeUsage(`$2.40 / 1M tokens`),
|
|
@@ -14240,6 +14260,7 @@
|
|
|
14240
14260
|
modelVariant: 'CHAT',
|
|
14241
14261
|
modelTitle: 'Claude 3.7 Sonnet',
|
|
14242
14262
|
modelName: 'claude-3-7-sonnet-20250219',
|
|
14263
|
+
modelDescription: 'Latest generation Claude model with advanced reasoning and language understanding. Enhanced capabilities over 3.5 with improved domain knowledge. 200K context window.',
|
|
14243
14264
|
pricing: {
|
|
14244
14265
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
14245
14266
|
output: computeUsage(`$15.00 / 1M tokens`),
|
|
@@ -14249,6 +14270,7 @@
|
|
|
14249
14270
|
modelVariant: 'CHAT',
|
|
14250
14271
|
modelTitle: 'Claude 3.5 Haiku',
|
|
14251
14272
|
modelName: 'claude-3-5-haiku-20241022',
|
|
14273
|
+
modelDescription: 'Fast and efficient Claude 3.5 variant optimized for speed and cost-effectiveness. Great for interactive applications requiring quick responses. 200K context window.',
|
|
14252
14274
|
pricing: {
|
|
14253
14275
|
prompt: computeUsage(`$0.25 / 1M tokens`),
|
|
14254
14276
|
output: computeUsage(`$1.25 / 1M tokens`),
|
|
@@ -14733,9 +14755,10 @@
|
|
|
14733
14755
|
modelVariant: 'COMPLETION',
|
|
14734
14756
|
modelTitle: 'davinci-002',
|
|
14735
14757
|
modelName: 'davinci-002',
|
|
14758
|
+
modelDescription: 'Legacy completion model with strong performance on text generation tasks. Optimized for complex instructions and longer outputs.',
|
|
14736
14759
|
pricing: {
|
|
14737
14760
|
prompt: computeUsage(`$2.00 / 1M tokens`),
|
|
14738
|
-
output: computeUsage(`$2.00 / 1M tokens`),
|
|
14761
|
+
output: computeUsage(`$2.00 / 1M tokens`),
|
|
14739
14762
|
},
|
|
14740
14763
|
},
|
|
14741
14764
|
/**/
|
|
@@ -14750,6 +14773,7 @@
|
|
|
14750
14773
|
modelVariant: 'CHAT',
|
|
14751
14774
|
modelTitle: 'gpt-3.5-turbo-16k',
|
|
14752
14775
|
modelName: 'gpt-3.5-turbo-16k',
|
|
14776
|
+
modelDescription: 'GPT-3.5 Turbo with extended 16k token context length for handling longer conversations and documents.',
|
|
14753
14777
|
pricing: {
|
|
14754
14778
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
14755
14779
|
output: computeUsage(`$4.00 / 1M tokens`),
|
|
@@ -14773,6 +14797,7 @@
|
|
|
14773
14797
|
modelVariant: 'CHAT',
|
|
14774
14798
|
modelTitle: 'gpt-4',
|
|
14775
14799
|
modelName: 'gpt-4',
|
|
14800
|
+
modelDescription: 'GPT-4 is a powerful language model with enhanced reasoning, instruction-following capabilities, and 8K context window. Optimized for complex tasks requiring deep understanding.',
|
|
14776
14801
|
pricing: {
|
|
14777
14802
|
prompt: computeUsage(`$30.00 / 1M tokens`),
|
|
14778
14803
|
output: computeUsage(`$60.00 / 1M tokens`),
|
|
@@ -14784,6 +14809,7 @@
|
|
|
14784
14809
|
modelVariant: 'CHAT',
|
|
14785
14810
|
modelTitle: 'gpt-4-32k',
|
|
14786
14811
|
modelName: 'gpt-4-32k',
|
|
14812
|
+
modelDescription: 'Extended context version of GPT-4 with a 32K token window for processing very long inputs and generating comprehensive responses for complex tasks.',
|
|
14787
14813
|
pricing: {
|
|
14788
14814
|
prompt: computeUsage(`$60.00 / 1M tokens`),
|
|
14789
14815
|
output: computeUsage(`$120.00 / 1M tokens`),
|
|
@@ -14806,6 +14832,7 @@
|
|
|
14806
14832
|
modelVariant: 'CHAT',
|
|
14807
14833
|
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
14808
14834
|
modelName: 'gpt-4-turbo-2024-04-09',
|
|
14835
|
+
modelDescription: 'Latest stable GPT-4 Turbo model from April 2024 with enhanced reasoning and context handling capabilities. Offers 128K context window and improved performance.',
|
|
14809
14836
|
pricing: {
|
|
14810
14837
|
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
14811
14838
|
output: computeUsage(`$30.00 / 1M tokens`),
|
|
@@ -14817,6 +14844,7 @@
|
|
|
14817
14844
|
modelVariant: 'CHAT',
|
|
14818
14845
|
modelTitle: 'gpt-3.5-turbo-1106',
|
|
14819
14846
|
modelName: 'gpt-3.5-turbo-1106',
|
|
14847
|
+
modelDescription: 'November 2023 version of GPT-3.5 Turbo with improved instruction following and a 16K token context window.',
|
|
14820
14848
|
pricing: {
|
|
14821
14849
|
prompt: computeUsage(`$1.00 / 1M tokens`),
|
|
14822
14850
|
output: computeUsage(`$2.00 / 1M tokens`),
|
|
@@ -14828,6 +14856,7 @@
|
|
|
14828
14856
|
modelVariant: 'CHAT',
|
|
14829
14857
|
modelTitle: 'gpt-4-turbo',
|
|
14830
14858
|
modelName: 'gpt-4-turbo',
|
|
14859
|
+
modelDescription: 'More capable model than GPT-4 with improved instruction following, function calling and a 128K token context window for handling very large documents.',
|
|
14831
14860
|
pricing: {
|
|
14832
14861
|
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
14833
14862
|
output: computeUsage(`$30.00 / 1M tokens`),
|
|
@@ -14839,6 +14868,7 @@
|
|
|
14839
14868
|
modelVariant: 'COMPLETION',
|
|
14840
14869
|
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
14841
14870
|
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
14871
|
+
modelDescription: 'September 2023 version of GPT-3.5 Turbo optimized for completion-style instruction following with a 4K context window.',
|
|
14842
14872
|
pricing: {
|
|
14843
14873
|
prompt: computeUsage(`$1.50 / 1M tokens`),
|
|
14844
14874
|
output: computeUsage(`$2.00 / 1M tokens`), // <- For gpt-3.5-turbo-instruct
|
|
@@ -14850,6 +14880,7 @@
|
|
|
14850
14880
|
modelVariant: 'COMPLETION',
|
|
14851
14881
|
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
14852
14882
|
modelName: 'gpt-3.5-turbo-instruct',
|
|
14883
|
+
modelDescription: 'Optimized version of GPT-3.5 for completion-style API with good instruction following and a 4K token context window.',
|
|
14853
14884
|
pricing: {
|
|
14854
14885
|
prompt: computeUsage(`$1.50 / 1M tokens`),
|
|
14855
14886
|
output: computeUsage(`$2.00 / 1M tokens`),
|
|
@@ -14867,9 +14898,10 @@
|
|
|
14867
14898
|
modelVariant: 'CHAT',
|
|
14868
14899
|
modelTitle: 'gpt-3.5-turbo',
|
|
14869
14900
|
modelName: 'gpt-3.5-turbo',
|
|
14901
|
+
modelDescription: 'Latest version of GPT-3.5 Turbo with improved performance and instruction following capabilities. Default 4K context window with options for 16K.',
|
|
14870
14902
|
pricing: {
|
|
14871
|
-
prompt: computeUsage(`$
|
|
14872
|
-
output: computeUsage(`$
|
|
14903
|
+
prompt: computeUsage(`$0.50 / 1M tokens`),
|
|
14904
|
+
output: computeUsage(`$1.50 / 1M tokens`),
|
|
14873
14905
|
},
|
|
14874
14906
|
},
|
|
14875
14907
|
/**/
|
|
@@ -14878,6 +14910,7 @@
|
|
|
14878
14910
|
modelVariant: 'CHAT',
|
|
14879
14911
|
modelTitle: 'gpt-3.5-turbo-0301',
|
|
14880
14912
|
modelName: 'gpt-3.5-turbo-0301',
|
|
14913
|
+
modelDescription: 'March 2023 version of GPT-3.5 Turbo with a 4K token context window. Legacy model maintained for backward compatibility.',
|
|
14881
14914
|
pricing: {
|
|
14882
14915
|
prompt: computeUsage(`$1.50 / 1M tokens`),
|
|
14883
14916
|
output: computeUsage(`$2.00 / 1M tokens`),
|
|
@@ -14889,9 +14922,10 @@
|
|
|
14889
14922
|
modelVariant: 'COMPLETION',
|
|
14890
14923
|
modelTitle: 'babbage-002',
|
|
14891
14924
|
modelName: 'babbage-002',
|
|
14925
|
+
modelDescription: 'Efficient legacy completion model with a good balance of performance and speed. Suitable for straightforward text generation tasks.',
|
|
14892
14926
|
pricing: {
|
|
14893
14927
|
prompt: computeUsage(`$0.40 / 1M tokens`),
|
|
14894
|
-
output: computeUsage(`$0.40 / 1M tokens`),
|
|
14928
|
+
output: computeUsage(`$0.40 / 1M tokens`),
|
|
14895
14929
|
},
|
|
14896
14930
|
},
|
|
14897
14931
|
/**/
|
|
@@ -14900,6 +14934,7 @@
|
|
|
14900
14934
|
modelVariant: 'CHAT',
|
|
14901
14935
|
modelTitle: 'gpt-4-1106-preview',
|
|
14902
14936
|
modelName: 'gpt-4-1106-preview',
|
|
14937
|
+
modelDescription: 'November 2023 preview version of GPT-4 Turbo with improved instruction following and a 128K token context window.',
|
|
14903
14938
|
pricing: {
|
|
14904
14939
|
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
14905
14940
|
output: computeUsage(`$30.00 / 1M tokens`),
|
|
@@ -14911,6 +14946,7 @@
|
|
|
14911
14946
|
modelVariant: 'CHAT',
|
|
14912
14947
|
modelTitle: 'gpt-4-0125-preview',
|
|
14913
14948
|
modelName: 'gpt-4-0125-preview',
|
|
14949
|
+
modelDescription: 'January 2024 preview version of GPT-4 Turbo with improved reasoning capabilities and a 128K token context window.',
|
|
14914
14950
|
pricing: {
|
|
14915
14951
|
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
14916
14952
|
output: computeUsage(`$30.00 / 1M tokens`),
|
|
@@ -14928,6 +14964,7 @@
|
|
|
14928
14964
|
modelVariant: 'CHAT',
|
|
14929
14965
|
modelTitle: 'gpt-3.5-turbo-0125',
|
|
14930
14966
|
modelName: 'gpt-3.5-turbo-0125',
|
|
14967
|
+
modelDescription: 'January 2024 version of GPT-3.5 Turbo with improved reasoning capabilities and a 16K token context window.',
|
|
14931
14968
|
pricing: {
|
|
14932
14969
|
prompt: computeUsage(`$0.50 / 1M tokens`),
|
|
14933
14970
|
output: computeUsage(`$1.50 / 1M tokens`),
|
|
@@ -14939,9 +14976,10 @@
|
|
|
14939
14976
|
modelVariant: 'CHAT',
|
|
14940
14977
|
modelTitle: 'gpt-4-turbo-preview',
|
|
14941
14978
|
modelName: 'gpt-4-turbo-preview',
|
|
14979
|
+
modelDescription: 'Preview version of GPT-4 Turbo that points to the latest model version. Features improved instruction following, 128K token context window and lower latency.',
|
|
14942
14980
|
pricing: {
|
|
14943
14981
|
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
14944
|
-
output: computeUsage(`$30.00 / 1M tokens`),
|
|
14982
|
+
output: computeUsage(`$30.00 / 1M tokens`),
|
|
14945
14983
|
},
|
|
14946
14984
|
},
|
|
14947
14985
|
/**/
|
|
@@ -14950,6 +14988,7 @@
|
|
|
14950
14988
|
modelVariant: 'EMBEDDING',
|
|
14951
14989
|
modelTitle: 'text-embedding-3-large',
|
|
14952
14990
|
modelName: 'text-embedding-3-large',
|
|
14991
|
+
modelDescription: "OpenAI's most capable text embedding model designed for high-quality embeddings for complex similarity tasks and information retrieval.",
|
|
14953
14992
|
pricing: {
|
|
14954
14993
|
prompt: computeUsage(`$0.13 / 1M tokens`),
|
|
14955
14994
|
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
@@ -14962,6 +15001,7 @@
|
|
|
14962
15001
|
modelVariant: 'EMBEDDING',
|
|
14963
15002
|
modelTitle: 'text-embedding-3-small',
|
|
14964
15003
|
modelName: 'text-embedding-3-small',
|
|
15004
|
+
modelDescription: 'Cost-effective embedding model with good performance for simpler tasks like text similarity and retrieval. Good balance of quality and efficiency.',
|
|
14965
15005
|
pricing: {
|
|
14966
15006
|
prompt: computeUsage(`$0.02 / 1M tokens`),
|
|
14967
15007
|
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
@@ -14974,6 +15014,7 @@
|
|
|
14974
15014
|
modelVariant: 'CHAT',
|
|
14975
15015
|
modelTitle: 'gpt-3.5-turbo-0613',
|
|
14976
15016
|
modelName: 'gpt-3.5-turbo-0613',
|
|
15017
|
+
modelDescription: 'June 2023 version of GPT-3.5 Turbo with function calling capabilities and a 4K token context window.',
|
|
14977
15018
|
pricing: {
|
|
14978
15019
|
prompt: computeUsage(`$1.50 / 1M tokens`),
|
|
14979
15020
|
output: computeUsage(`$2.00 / 1M tokens`),
|
|
@@ -14985,6 +15026,7 @@
|
|
|
14985
15026
|
modelVariant: 'EMBEDDING',
|
|
14986
15027
|
modelTitle: 'text-embedding-ada-002',
|
|
14987
15028
|
modelName: 'text-embedding-ada-002',
|
|
15029
|
+
modelDescription: 'Legacy text embedding model suitable for text similarity and retrieval augmented generation use cases. Replaced by newer embedding-3 models.',
|
|
14988
15030
|
pricing: {
|
|
14989
15031
|
prompt: computeUsage(`$0.1 / 1M tokens`),
|
|
14990
15032
|
// TODO: [🏏] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
@@ -15015,6 +15057,7 @@
|
|
|
15015
15057
|
modelVariant: 'CHAT',
|
|
15016
15058
|
modelTitle: 'gpt-4o-2024-05-13',
|
|
15017
15059
|
modelName: 'gpt-4o-2024-05-13',
|
|
15060
|
+
modelDescription: 'May 2024 version of GPT-4o with enhanced multimodal capabilities, improved reasoning, and optimized for vision, audio and chat at lower latencies.',
|
|
15018
15061
|
pricing: {
|
|
15019
15062
|
prompt: computeUsage(`$5.00 / 1M tokens`),
|
|
15020
15063
|
output: computeUsage(`$15.00 / 1M tokens`),
|
|
@@ -15026,6 +15069,7 @@
|
|
|
15026
15069
|
modelVariant: 'CHAT',
|
|
15027
15070
|
modelTitle: 'gpt-4o',
|
|
15028
15071
|
modelName: 'gpt-4o',
|
|
15072
|
+
modelDescription: "OpenAI's most advanced multimodal model optimized for performance, speed, and cost. Capable of vision, reasoning, and high quality text generation.",
|
|
15029
15073
|
pricing: {
|
|
15030
15074
|
prompt: computeUsage(`$5.00 / 1M tokens`),
|
|
15031
15075
|
output: computeUsage(`$15.00 / 1M tokens`),
|
|
@@ -15037,6 +15081,7 @@
|
|
|
15037
15081
|
modelVariant: 'CHAT',
|
|
15038
15082
|
modelTitle: 'gpt-4o-mini',
|
|
15039
15083
|
modelName: 'gpt-4o-mini',
|
|
15084
|
+
modelDescription: 'Smaller, more cost-effective version of GPT-4o with good performance across text, vision, and audio tasks at reduced complexity.',
|
|
15040
15085
|
pricing: {
|
|
15041
15086
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
15042
15087
|
output: computeUsage(`$9.00 / 1M tokens`),
|
|
@@ -15048,6 +15093,7 @@
|
|
|
15048
15093
|
modelVariant: 'CHAT',
|
|
15049
15094
|
modelTitle: 'o1-preview',
|
|
15050
15095
|
modelName: 'o1-preview',
|
|
15096
|
+
modelDescription: 'Advanced reasoning model with exceptional performance on complex logical, mathematical, and analytical tasks. Built for deep reasoning and specialized professional tasks.',
|
|
15051
15097
|
pricing: {
|
|
15052
15098
|
prompt: computeUsage(`$15.00 / 1M tokens`),
|
|
15053
15099
|
output: computeUsage(`$60.00 / 1M tokens`),
|
|
@@ -15059,6 +15105,7 @@
|
|
|
15059
15105
|
modelVariant: 'CHAT',
|
|
15060
15106
|
modelTitle: 'o1-preview-2024-09-12',
|
|
15061
15107
|
modelName: 'o1-preview-2024-09-12',
|
|
15108
|
+
modelDescription: 'September 2024 version of O1 preview with specialized reasoning capabilities for complex tasks requiring precise analytical thinking.',
|
|
15062
15109
|
// <- TODO: [💩] Some better system to organize theese date suffixes and versions
|
|
15063
15110
|
pricing: {
|
|
15064
15111
|
prompt: computeUsage(`$15.00 / 1M tokens`),
|
|
@@ -15071,6 +15118,7 @@
|
|
|
15071
15118
|
modelVariant: 'CHAT',
|
|
15072
15119
|
modelTitle: 'o1-mini',
|
|
15073
15120
|
modelName: 'o1-mini',
|
|
15121
|
+
modelDescription: 'Smaller, cost-effective version of the O1 model with good performance on reasoning tasks while maintaining efficiency for everyday analytical use.',
|
|
15074
15122
|
pricing: {
|
|
15075
15123
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
15076
15124
|
output: computeUsage(`$12.00 / 1M tokens`),
|
|
@@ -15082,10 +15130,10 @@
|
|
|
15082
15130
|
modelVariant: 'CHAT',
|
|
15083
15131
|
modelTitle: 'o1',
|
|
15084
15132
|
modelName: 'o1',
|
|
15133
|
+
modelDescription: "OpenAI's advanced reasoning model focused on logic and problem-solving. Designed for complex analytical tasks with rigorous step-by-step reasoning. 128K context window.",
|
|
15085
15134
|
pricing: {
|
|
15086
|
-
prompt: computeUsage(`$
|
|
15087
|
-
output: computeUsage(`$
|
|
15088
|
-
// <- TODO: !! Unsure, check the pricing
|
|
15135
|
+
prompt: computeUsage(`$15.00 / 1M tokens`),
|
|
15136
|
+
output: computeUsage(`$60.00 / 1M tokens`),
|
|
15089
15137
|
},
|
|
15090
15138
|
},
|
|
15091
15139
|
/**/
|
|
@@ -15094,6 +15142,7 @@
|
|
|
15094
15142
|
modelVariant: 'CHAT',
|
|
15095
15143
|
modelTitle: 'o3-mini',
|
|
15096
15144
|
modelName: 'o3-mini',
|
|
15145
|
+
modelDescription: 'Cost-effective reasoning model optimized for academic and scientific problem-solving. Efficient performance on STEM tasks with deep mathematical and scientific knowledge. 128K context window.',
|
|
15097
15146
|
pricing: {
|
|
15098
15147
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
15099
15148
|
output: computeUsage(`$12.00 / 1M tokens`),
|
|
@@ -15106,6 +15155,7 @@
|
|
|
15106
15155
|
modelVariant: 'CHAT',
|
|
15107
15156
|
modelTitle: 'o1-mini-2024-09-12',
|
|
15108
15157
|
modelName: 'o1-mini-2024-09-12',
|
|
15158
|
+
modelDescription: "September 2024 version of O1-mini with balanced reasoning capabilities and cost-efficiency. Good for analytical tasks that don't require the full O1 model.",
|
|
15109
15159
|
pricing: {
|
|
15110
15160
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
15111
15161
|
output: computeUsage(`$12.00 / 1M tokens`),
|
|
@@ -15117,6 +15167,7 @@
|
|
|
15117
15167
|
modelVariant: 'CHAT',
|
|
15118
15168
|
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
15119
15169
|
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
15170
|
+
modelDescription: 'June 2023 version of GPT-3.5 Turbo with extended 16k token context window for processing longer conversations and documents.',
|
|
15120
15171
|
pricing: {
|
|
15121
15172
|
prompt: computeUsage(`$3.00 / 1M tokens`),
|
|
15122
15173
|
output: computeUsage(`$4.00 / 1M tokens`),
|
|
@@ -15670,6 +15721,67 @@
|
|
|
15670
15721
|
};
|
|
15671
15722
|
}
|
|
15672
15723
|
|
|
15724
|
+
/**
|
|
15725
|
+
* List of available Deepseek models with descriptions
|
|
15726
|
+
*
|
|
15727
|
+
* Note: Done at 2025-04-22
|
|
15728
|
+
*
|
|
15729
|
+
* @see https://www.deepseek.com/models
|
|
15730
|
+
* @public exported from `@promptbook/deepseek`
|
|
15731
|
+
*/
|
|
15732
|
+
const DEEPSEEK_MODELS = exportJson({
|
|
15733
|
+
name: 'DEEPSEEK_MODELS',
|
|
15734
|
+
value: [
|
|
15735
|
+
{
|
|
15736
|
+
modelVariant: 'CHAT',
|
|
15737
|
+
modelTitle: 'Deepseek Chat',
|
|
15738
|
+
modelName: 'deepseek-chat',
|
|
15739
|
+
modelDescription: 'General-purpose language model with strong performance across conversation, reasoning, and content generation. 128K context window with excellent instruction following capabilities.',
|
|
15740
|
+
pricing: {
|
|
15741
|
+
prompt: computeUsage(`$1.00 / 1M tokens`),
|
|
15742
|
+
output: computeUsage(`$2.00 / 1M tokens`),
|
|
15743
|
+
},
|
|
15744
|
+
},
|
|
15745
|
+
{
|
|
15746
|
+
modelVariant: 'CHAT',
|
|
15747
|
+
modelTitle: 'Deepseek Reasoner',
|
|
15748
|
+
modelName: 'deepseek-reasoner',
|
|
15749
|
+
modelDescription: 'Specialized model focused on complex reasoning tasks like mathematical problem-solving and logical analysis. Enhanced step-by-step reasoning with explicit chain-of-thought processes. 128K context window.',
|
|
15750
|
+
pricing: {
|
|
15751
|
+
prompt: computeUsage(`$4.00 / 1M tokens`),
|
|
15752
|
+
output: computeUsage(`$8.00 / 1M tokens`),
|
|
15753
|
+
},
|
|
15754
|
+
},
|
|
15755
|
+
{
|
|
15756
|
+
modelVariant: 'CHAT',
|
|
15757
|
+
modelTitle: 'DeepSeek V3',
|
|
15758
|
+
modelName: 'deepseek-v3-0324',
|
|
15759
|
+
modelDescription: 'Advanced general-purpose model with improved reasoning, coding abilities, and multimodal understanding. Built on the latest DeepSeek architecture with enhanced knowledge representation.',
|
|
15760
|
+
pricing: {
|
|
15761
|
+
prompt: computeUsage(`$1.50 / 1M tokens`),
|
|
15762
|
+
output: computeUsage(`$3.00 / 1M tokens`),
|
|
15763
|
+
},
|
|
15764
|
+
},
|
|
15765
|
+
{
|
|
15766
|
+
modelVariant: 'CHAT',
|
|
15767
|
+
modelTitle: 'DeepSeek R1',
|
|
15768
|
+
modelName: 'deepseek-r1',
|
|
15769
|
+
modelDescription: 'Research-focused model optimized for scientific problem-solving and analytical tasks. Excellent performance on tasks requiring domain-specific expertise and critical thinking.',
|
|
15770
|
+
pricing: {
|
|
15771
|
+
prompt: computeUsage(`$5.00 / 1M tokens`),
|
|
15772
|
+
output: computeUsage(`$10.00 / 1M tokens`),
|
|
15773
|
+
},
|
|
15774
|
+
},
|
|
15775
|
+
// <- [🕕]
|
|
15776
|
+
],
|
|
15777
|
+
});
|
|
15778
|
+
/**
|
|
15779
|
+
* TODO: [🧠] Add information about context window sizes, capabilities, and relative performance characteristics
|
|
15780
|
+
* TODO: [🎰] Some mechanism to auto-update available models
|
|
15781
|
+
* TODO: [🧠] Verify pricing information is current with Deepseek's official documentation
|
|
15782
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
15783
|
+
*/
|
|
15784
|
+
|
|
15673
15785
|
/**
|
|
15674
15786
|
* Execution Tools for calling Deepseek API.
|
|
15675
15787
|
*
|
|
@@ -15691,18 +15803,7 @@
|
|
|
15691
15803
|
title: 'Deepseek',
|
|
15692
15804
|
description: 'Implementation of Deepseek models',
|
|
15693
15805
|
vercelProvider: deepseekVercelProvider,
|
|
15694
|
-
availableModels:
|
|
15695
|
-
{
|
|
15696
|
-
modelName: 'deepseek-chat',
|
|
15697
|
-
modelVariant: 'CHAT',
|
|
15698
|
-
},
|
|
15699
|
-
{
|
|
15700
|
-
modelName: 'deepseek-reasoner',
|
|
15701
|
-
modelVariant: 'CHAT',
|
|
15702
|
-
},
|
|
15703
|
-
// <- [🕕]
|
|
15704
|
-
// <- TODO: How picking of the default model looks like in `createExecutionToolsFromVercelProvider`
|
|
15705
|
-
],
|
|
15806
|
+
availableModels: DEEPSEEK_MODELS,
|
|
15706
15807
|
...options,
|
|
15707
15808
|
});
|
|
15708
15809
|
}, {
|
|
@@ -15777,6 +15878,173 @@
|
|
|
15777
15878
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
15778
15879
|
*/
|
|
15779
15880
|
|
|
15881
|
+
/**
|
|
15882
|
+
* List of available Google models with descriptions
|
|
15883
|
+
*
|
|
15884
|
+
* Note: Done at 2025-04-22
|
|
15885
|
+
*
|
|
15886
|
+
* @see https://ai.google.dev/models/gemini
|
|
15887
|
+
* @public exported from `@promptbook/google`
|
|
15888
|
+
*/
|
|
15889
|
+
const GOOGLE_MODELS = exportJson({
|
|
15890
|
+
name: 'GOOGLE_MODELS',
|
|
15891
|
+
value: [
|
|
15892
|
+
{
|
|
15893
|
+
modelVariant: 'CHAT',
|
|
15894
|
+
modelTitle: 'Gemini 2.5 Pro',
|
|
15895
|
+
modelName: 'gemini-2.5-pro-preview-03-25',
|
|
15896
|
+
modelDescription: 'Latest advanced multimodal model with exceptional reasoning, tool use, and instruction following. 1M token context window with improved vision capabilities for complex visual tasks.',
|
|
15897
|
+
pricing: {
|
|
15898
|
+
prompt: computeUsage(`$7.00 / 1M tokens`),
|
|
15899
|
+
output: computeUsage(`$21.00 / 1M tokens`),
|
|
15900
|
+
},
|
|
15901
|
+
},
|
|
15902
|
+
{
|
|
15903
|
+
modelVariant: 'CHAT',
|
|
15904
|
+
modelTitle: 'Gemini 2.0 Flash',
|
|
15905
|
+
modelName: 'gemini-2.0-flash',
|
|
15906
|
+
modelDescription: 'Fast, efficient model optimized for rapid response times. Good balance between performance and cost, with strong capabilities across text, code, and reasoning tasks. 128K context window.',
|
|
15907
|
+
pricing: {
|
|
15908
|
+
prompt: computeUsage(`$0.35 / 1M tokens`),
|
|
15909
|
+
output: computeUsage(`$1.05 / 1M tokens`),
|
|
15910
|
+
},
|
|
15911
|
+
},
|
|
15912
|
+
{
|
|
15913
|
+
modelVariant: 'CHAT',
|
|
15914
|
+
modelTitle: 'Gemini 2.0 Flash Lite',
|
|
15915
|
+
modelName: 'gemini-2.0-flash-lite',
|
|
15916
|
+
modelDescription: 'Streamlined version of Gemini 2.0 Flash, designed for extremely low-latency applications and edge deployments. Optimized for efficiency while maintaining core capabilities.',
|
|
15917
|
+
pricing: {
|
|
15918
|
+
prompt: computeUsage(`$0.20 / 1M tokens`),
|
|
15919
|
+
output: computeUsage(`$0.60 / 1M tokens`),
|
|
15920
|
+
},
|
|
15921
|
+
},
|
|
15922
|
+
{
|
|
15923
|
+
modelVariant: 'CHAT',
|
|
15924
|
+
modelTitle: 'Gemini 2.0 Flash Thinking',
|
|
15925
|
+
modelName: 'gemini-2.0-flash-thinking-exp-01-21',
|
|
15926
|
+
modelDescription: 'Experimental model focused on enhanced reasoning with explicit chain-of-thought processes. Designed for tasks requiring structured thinking and problem-solving approaches.',
|
|
15927
|
+
pricing: {
|
|
15928
|
+
prompt: computeUsage(`$0.35 / 1M tokens`),
|
|
15929
|
+
output: computeUsage(`$1.05 / 1M tokens`),
|
|
15930
|
+
},
|
|
15931
|
+
},
|
|
15932
|
+
{
|
|
15933
|
+
modelVariant: 'CHAT',
|
|
15934
|
+
modelTitle: 'Gemini 1.5 Flash',
|
|
15935
|
+
modelName: 'gemini-1.5-flash',
|
|
15936
|
+
modelDescription: 'Efficient model balancing speed and quality for general-purpose applications. 1M token context window with good multimodal capabilities and quick response times.',
|
|
15937
|
+
pricing: {
|
|
15938
|
+
prompt: computeUsage(`$0.35 / 1M tokens`),
|
|
15939
|
+
output: computeUsage(`$1.05 / 1M tokens`),
|
|
15940
|
+
},
|
|
15941
|
+
},
|
|
15942
|
+
{
|
|
15943
|
+
modelVariant: 'CHAT',
|
|
15944
|
+
modelTitle: 'Gemini 1.5 Flash Latest',
|
|
15945
|
+
modelName: 'gemini-1.5-flash-latest',
|
|
15946
|
+
modelDescription: 'Points to the latest version of Gemini 1.5 Flash, ensuring access to the most recent improvements and bug fixes while maintaining stable interfaces.',
|
|
15947
|
+
},
|
|
15948
|
+
{
|
|
15949
|
+
modelVariant: 'CHAT',
|
|
15950
|
+
modelTitle: 'Gemini 1.5 Flash 001',
|
|
15951
|
+
modelName: 'gemini-1.5-flash-001',
|
|
15952
|
+
modelDescription: 'First stable release of Gemini 1.5 Flash model with reliable performance characteristics for production applications. 1M token context window.',
|
|
15953
|
+
},
|
|
15954
|
+
{
|
|
15955
|
+
modelVariant: 'CHAT',
|
|
15956
|
+
modelTitle: 'Gemini 1.5 Flash 002',
|
|
15957
|
+
modelName: 'gemini-1.5-flash-002',
|
|
15958
|
+
modelDescription: 'Improved version of Gemini 1.5 Flash with enhanced instruction following and more consistent outputs. Refined for better application integration.',
|
|
15959
|
+
},
|
|
15960
|
+
{
|
|
15961
|
+
modelVariant: 'CHAT',
|
|
15962
|
+
modelTitle: 'Gemini 1.5 Flash Exp',
|
|
15963
|
+
modelName: 'gemini-1.5-flash-exp-0827',
|
|
15964
|
+
modelDescription: 'Experimental version of Gemini 1.5 Flash with new capabilities being tested. May offer improved performance but with potential behavior differences from stable releases.',
|
|
15965
|
+
},
|
|
15966
|
+
{
|
|
15967
|
+
modelVariant: 'CHAT',
|
|
15968
|
+
modelTitle: 'Gemini 1.5 Flash 8B',
|
|
15969
|
+
modelName: 'gemini-1.5-flash-8b',
|
|
15970
|
+
modelDescription: 'Compact 8B parameter model optimized for efficiency and deployment in resource-constrained environments. Good performance despite smaller size.',
|
|
15971
|
+
},
|
|
15972
|
+
{
|
|
15973
|
+
modelVariant: 'CHAT',
|
|
15974
|
+
modelTitle: 'Gemini 1.5 Flash 8B Latest',
|
|
15975
|
+
modelName: 'gemini-1.5-flash-8b-latest',
|
|
15976
|
+
modelDescription: 'Points to the most recent version of the compact 8B parameter model, providing latest improvements while maintaining a small footprint.',
|
|
15977
|
+
},
|
|
15978
|
+
{
|
|
15979
|
+
modelVariant: 'CHAT',
|
|
15980
|
+
modelTitle: 'Gemini 1.5 Flash 8B Exp',
|
|
15981
|
+
modelName: 'gemini-1.5-flash-8b-exp-0924',
|
|
15982
|
+
modelDescription: 'Experimental version of the 8B parameter model with new capabilities and optimizations being evaluated for future stable releases.',
|
|
15983
|
+
},
|
|
15984
|
+
{
|
|
15985
|
+
modelVariant: 'CHAT',
|
|
15986
|
+
modelTitle: 'Gemini 1.5 Flash 8B Exp',
|
|
15987
|
+
modelName: 'gemini-1.5-flash-8b-exp-0827',
|
|
15988
|
+
modelDescription: 'August experimental release of the efficient 8B parameter model with specific improvements to reasoning capabilities and response quality.',
|
|
15989
|
+
},
|
|
15990
|
+
{
|
|
15991
|
+
modelVariant: 'CHAT',
|
|
15992
|
+
modelTitle: 'Gemini 1.5 Pro Latest',
|
|
15993
|
+
modelName: 'gemini-1.5-pro-latest',
|
|
15994
|
+
modelDescription: 'Points to the most recent version of the flagship Gemini 1.5 Pro model, ensuring access to the latest capabilities and improvements.',
|
|
15995
|
+
pricing: {
|
|
15996
|
+
prompt: computeUsage(`$7.00 / 1M tokens`),
|
|
15997
|
+
output: computeUsage(`$21.00 / 1M tokens`),
|
|
15998
|
+
},
|
|
15999
|
+
},
|
|
16000
|
+
{
|
|
16001
|
+
modelVariant: 'CHAT',
|
|
16002
|
+
modelTitle: 'Gemini 1.5 Pro',
|
|
16003
|
+
modelName: 'gemini-1.5-pro',
|
|
16004
|
+
modelDescription: 'Flagship multimodal model with strong performance across text, code, vision, and audio tasks. 1M token context window with excellent reasoning capabilities.',
|
|
16005
|
+
pricing: {
|
|
16006
|
+
prompt: computeUsage(`$7.00 / 1M tokens`),
|
|
16007
|
+
output: computeUsage(`$21.00 / 1M tokens`),
|
|
16008
|
+
},
|
|
16009
|
+
},
|
|
16010
|
+
{
|
|
16011
|
+
modelVariant: 'CHAT',
|
|
16012
|
+
modelTitle: 'Gemini 1.5 Pro 001',
|
|
16013
|
+
modelName: 'gemini-1.5-pro-001',
|
|
16014
|
+
modelDescription: 'First stable release of Gemini 1.5 Pro with consistent performance characteristics and reliable behavior for production applications.',
|
|
16015
|
+
},
|
|
16016
|
+
{
|
|
16017
|
+
modelVariant: 'CHAT',
|
|
16018
|
+
modelTitle: 'Gemini 1.5 Pro 002',
|
|
16019
|
+
modelName: 'gemini-1.5-pro-002',
|
|
16020
|
+
modelDescription: 'Refined version of Gemini 1.5 Pro with improved instruction following, better multimodal understanding, and more consistent outputs.',
|
|
16021
|
+
},
|
|
16022
|
+
{
|
|
16023
|
+
modelVariant: 'CHAT',
|
|
16024
|
+
modelTitle: 'Gemini 1.5 Pro Exp',
|
|
16025
|
+
modelName: 'gemini-1.5-pro-exp-0827',
|
|
16026
|
+
modelDescription: 'Experimental version of Gemini 1.5 Pro with new capabilities and optimizations being tested before wider release. May offer improved performance.',
|
|
16027
|
+
},
|
|
16028
|
+
{
|
|
16029
|
+
modelVariant: 'CHAT',
|
|
16030
|
+
modelTitle: 'Gemini 1.0 Pro',
|
|
16031
|
+
modelName: 'gemini-1.0-pro',
|
|
16032
|
+
modelDescription: 'Original Gemini series foundation model with solid multimodal capabilities. 32K context window with good performance on text, code, and basic vision tasks.',
|
|
16033
|
+
pricing: {
|
|
16034
|
+
prompt: computeUsage(`$0.35 / 1M tokens`),
|
|
16035
|
+
output: computeUsage(`$1.05 / 1M tokens`),
|
|
16036
|
+
},
|
|
16037
|
+
},
|
|
16038
|
+
// <- [🕕]
|
|
16039
|
+
],
|
|
16040
|
+
});
|
|
16041
|
+
/**
|
|
16042
|
+
* TODO: [🧠] Add information about context window sizes, capabilities, and relative performance characteristics
|
|
16043
|
+
* TODO: [🎰] Some mechanism to auto-update available models
|
|
16044
|
+
* TODO: [🧠] Verify pricing information is current with Google's official documentation
|
|
16045
|
+
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
16046
|
+
*/
|
|
16047
|
+
|
|
15780
16048
|
/**
|
|
15781
16049
|
* Execution Tools for calling Google Gemini API.
|
|
15782
16050
|
*
|
|
@@ -15798,29 +16066,7 @@
|
|
|
15798
16066
|
title: 'Google',
|
|
15799
16067
|
description: 'Implementation of Google models',
|
|
15800
16068
|
vercelProvider: googleGeminiVercelProvider,
|
|
15801
|
-
availableModels:
|
|
15802
|
-
// TODO: [🕘] Maybe list models in same way as in other providers - in separate file with metadata
|
|
15803
|
-
'gemini-2.5-pro-preview-03-25',
|
|
15804
|
-
'gemini-2.0-flash',
|
|
15805
|
-
'gemini-2.0-flash-lite',
|
|
15806
|
-
'gemini-2.0-flash-thinking-exp-01-21',
|
|
15807
|
-
'gemini-1.5-flash',
|
|
15808
|
-
'gemini-1.5-flash-latest',
|
|
15809
|
-
'gemini-1.5-flash-001',
|
|
15810
|
-
'gemini-1.5-flash-002',
|
|
15811
|
-
'gemini-1.5-flash-exp-0827',
|
|
15812
|
-
'gemini-1.5-flash-8b',
|
|
15813
|
-
'gemini-1.5-flash-8b-latest',
|
|
15814
|
-
'gemini-1.5-flash-8b-exp-0924',
|
|
15815
|
-
'gemini-1.5-flash-8b-exp-0827',
|
|
15816
|
-
'gemini-1.5-pro-latest',
|
|
15817
|
-
'gemini-1.5-pro',
|
|
15818
|
-
'gemini-1.5-pro-001',
|
|
15819
|
-
'gemini-1.5-pro-002',
|
|
15820
|
-
'gemini-1.5-pro-exp-0827',
|
|
15821
|
-
'gemini-1.0-pro',
|
|
15822
|
-
// <- [🕕]
|
|
15823
|
-
].map((modelName) => ({ modelName, modelVariant: 'CHAT' })),
|
|
16069
|
+
availableModels: GOOGLE_MODELS,
|
|
15824
16070
|
...options,
|
|
15825
16071
|
});
|
|
15826
16072
|
}, {
|