@promptbook/node 0.61.0-15 โ 0.61.0-17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +1945 -616
- package/esm/index.es.js.map +1 -1
- package/esm/typings/promptbook-collection/index.d.ts +12 -15
- package/esm/typings/src/_packages/core.index.d.ts +2 -1
- package/esm/typings/src/_packages/node.index.d.ts +2 -1
- package/esm/typings/src/_packages/types.index.d.ts +2 -3
- package/esm/typings/src/_packages/utils.index.d.ts +4 -3
- package/esm/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -3
- package/esm/typings/src/commands/_common/types/CommandParser.d.ts +28 -3
- package/esm/typings/src/config.d.ts +26 -1
- package/esm/typings/src/config.test.d.ts +4 -0
- package/esm/typings/src/conversion/utils/extractParametersFromPromptTemplate.d.ts +2 -2
- package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
- package/esm/typings/src/conversion/validation/validatePipeline.d.ts +3 -0
- package/esm/typings/src/execution/PipelineExecutor.d.ts +5 -5
- package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -3
- package/esm/typings/src/execution/ScriptExecutionTools.d.ts +2 -3
- package/esm/typings/src/execution/createPipelineExecutor.d.ts +16 -3
- package/esm/typings/src/formats/_common/FormatDefinition.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/_common/Scraper.d.ts +27 -0
- package/esm/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
- package/esm/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
- package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
- package/esm/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
- package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
- package/esm/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
- package/esm/typings/src/prepare/isPipelinePrepared.d.ts +10 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -1
- package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +4 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +5 -3
- package/esm/typings/src/types/Prompt.d.ts +7 -10
- package/esm/typings/src/types/typeAliases.d.ts +44 -4
- package/esm/typings/src/utils/deepFreeze.d.ts +10 -1
- package/esm/typings/src/utils/extractParameters.d.ts +2 -2
- package/esm/typings/src/{execution/utils โ utils}/replaceParameters.d.ts +2 -4
- package/esm/typings/src/utils/sets/difference.d.ts +3 -0
- package/package.json +4 -2
- package/umd/index.umd.js +1949 -619
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/promptbook-collection/index.d.ts +12 -15
- package/umd/typings/src/_packages/core.index.d.ts +2 -1
- package/umd/typings/src/_packages/node.index.d.ts +2 -1
- package/umd/typings/src/_packages/types.index.d.ts +2 -3
- package/umd/typings/src/_packages/utils.index.d.ts +4 -3
- package/umd/typings/src/commands/KNOWLEDGE/KnowledgeCommand.d.ts +2 -3
- package/umd/typings/src/commands/_common/types/CommandParser.d.ts +28 -3
- package/umd/typings/src/config.d.ts +26 -1
- package/umd/typings/src/config.test.d.ts +4 -0
- package/umd/typings/src/conversion/utils/extractParametersFromPromptTemplate.d.ts +2 -2
- package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -1
- package/umd/typings/src/conversion/validation/validatePipeline.d.ts +3 -0
- package/umd/typings/src/execution/PipelineExecutor.d.ts +5 -5
- package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -3
- package/umd/typings/src/execution/ScriptExecutionTools.d.ts +2 -3
- package/umd/typings/src/execution/createPipelineExecutor.d.ts +16 -3
- package/umd/typings/src/formats/_common/FormatDefinition.d.ts +1 -1
- package/umd/typings/src/knowledge/prepare-knowledge/_common/Scraper.d.ts +27 -0
- package/umd/typings/src/knowledge/prepare-knowledge/_common/prepareKnowledgePieces.d.ts +1 -1
- package/umd/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
- package/umd/typings/src/knowledge/prepare-knowledge/pdf/prepareKnowledgeFromPdf.d.ts +1 -1
- package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -1
- package/umd/typings/src/llm-providers/azure-openai/AzureOpenAiExecutionTools.d.ts +2 -2
- package/umd/typings/src/llm-providers/mocked/MockedEchoLlmExecutionTools.d.ts +2 -2
- package/umd/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
- package/umd/typings/src/llm-providers/openai/OpenAiExecutionTools.d.ts +3 -3
- package/umd/typings/src/prepare/isPipelinePrepared.d.ts +10 -0
- package/umd/typings/src/prepare/isPipelinePrepared.test.d.ts +1 -0
- package/umd/typings/src/prepare/preparePipeline.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -1
- package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +4 -0
- package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +5 -3
- package/umd/typings/src/types/Prompt.d.ts +7 -10
- package/umd/typings/src/types/typeAliases.d.ts +44 -4
- package/umd/typings/src/utils/deepFreeze.d.ts +10 -1
- package/umd/typings/src/utils/extractParameters.d.ts +2 -2
- package/umd/typings/src/{execution/utils โ utils}/replaceParameters.d.ts +2 -4
- package/umd/typings/src/utils/replaceParameters.test.d.ts +1 -0
- package/umd/typings/src/utils/sets/difference.d.ts +3 -0
- package/esm/typings/src/types/Parameters.d.ts +0 -14
- package/umd/typings/src/types/Parameters.d.ts +0 -14
- /package/esm/typings/src/{execution/utils/replaceParameters.test.d.ts โ prepare/isPipelinePrepared.test.d.ts} +0 -0
- /package/{umd/typings/src/execution โ esm/typings/src}/utils/replaceParameters.test.d.ts +0 -0
package/esm/index.es.js
CHANGED
|
@@ -4,6 +4,8 @@ import { join } from 'path';
|
|
|
4
4
|
import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
|
|
5
5
|
import { format } from 'prettier';
|
|
6
6
|
import parserHtml from 'prettier/parser-html';
|
|
7
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
8
|
+
import OpenAI from 'openai';
|
|
7
9
|
|
|
8
10
|
/*! *****************************************************************************
|
|
9
11
|
Copyright (c) Microsoft Corporation.
|
|
@@ -129,7 +131,7 @@ function __spreadArray(to, from, pack) {
|
|
|
129
131
|
*
|
|
130
132
|
* @returns The same object as the input, but deeply frozen
|
|
131
133
|
*
|
|
132
|
-
* Note: This function mutates the object
|
|
134
|
+
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
133
135
|
*/
|
|
134
136
|
function deepFreeze(objectValue) {
|
|
135
137
|
var e_1, _a;
|
|
@@ -152,6 +154,17 @@ function deepFreeze(objectValue) {
|
|
|
152
154
|
}
|
|
153
155
|
return Object.freeze(objectValue);
|
|
154
156
|
}
|
|
157
|
+
/**
|
|
158
|
+
* @@@
|
|
159
|
+
* @@@
|
|
160
|
+
*
|
|
161
|
+
* @returns The same object as the input, but deeply frozen
|
|
162
|
+
*
|
|
163
|
+
* Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
|
|
164
|
+
*/
|
|
165
|
+
function deepFreezeWithSameType(objectValue) {
|
|
166
|
+
return deepFreeze(objectValue);
|
|
167
|
+
}
|
|
155
168
|
/**
|
|
156
169
|
* TODO: [๐ผ] Export from `@promptbook/utils`
|
|
157
170
|
* TODO: [๐ง ] Is there a way how to meaningfully test this utility
|
|
@@ -165,6 +178,10 @@ var LOOP_LIMIT = 1000;
|
|
|
165
178
|
* The maximum number of (LLM) tasks running in parallel
|
|
166
179
|
*/
|
|
167
180
|
var MAX_PARALLEL_COUNT = 5;
|
|
181
|
+
/**
|
|
182
|
+
* The maximum number of attempts to execute LLM task before giving up
|
|
183
|
+
*/
|
|
184
|
+
var MAX_EXECUTION_ATTEMPTS = 3;
|
|
168
185
|
/**
|
|
169
186
|
* The name of the builded pipeline collection made by CLI `ptbk make` and for lookup in `createCollectionFromDirectory`
|
|
170
187
|
*/
|
|
@@ -174,6 +191,7 @@ var PIPELINE_COLLECTION_BASE_FILENAME = "index";
|
|
|
174
191
|
*/
|
|
175
192
|
var RESERVED_PARAMETER_NAMES = deepFreeze([
|
|
176
193
|
'context',
|
|
194
|
+
'currentDate',
|
|
177
195
|
// <- TODO: Add more like 'date', 'modelName',...
|
|
178
196
|
// <- TODO: Add [emoji] + instructions ACRY when adding new reserved parameter
|
|
179
197
|
]);
|
|
@@ -181,6 +199,9 @@ var RESERVED_PARAMETER_NAMES = deepFreeze([
|
|
|
181
199
|
TODO: !!! Just testing false-negative detection of [๐ก][๐ข][๐ต][โช] leak
|
|
182
200
|
*/
|
|
183
201
|
// [๐ก][๐ข][๐ต][โช]
|
|
202
|
+
/**
|
|
203
|
+
* TODO: [๐ผ] Export all to core
|
|
204
|
+
*/
|
|
184
205
|
|
|
185
206
|
/**
|
|
186
207
|
* @@@
|
|
@@ -374,7 +395,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
374
395
|
});
|
|
375
396
|
}
|
|
376
397
|
|
|
377
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
398
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-16",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-16",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
378
399
|
|
|
379
400
|
/**
|
|
380
401
|
* Prettify the html code
|
|
@@ -480,7 +501,7 @@ function pipelineJsonToString(pipelineJson) {
|
|
|
480
501
|
/* Note: Not using:> name, */
|
|
481
502
|
title_1 = promptTemplate.title, description_1 = promptTemplate.description,
|
|
482
503
|
/* Note: dependentParameterNames, */
|
|
483
|
-
jokers = promptTemplate.
|
|
504
|
+
jokers = promptTemplate.jokerParameterNames, blockType = promptTemplate.blockType, content = promptTemplate.content, postprocessing = promptTemplate.postprocessingFunctionNames, expectations = promptTemplate.expectations, expectFormat = promptTemplate.expectFormat, resultingParameterName = promptTemplate.resultingParameterName;
|
|
484
505
|
pipelineString += '\n\n';
|
|
485
506
|
pipelineString += "## ".concat(title_1);
|
|
486
507
|
if (description_1) {
|
|
@@ -820,7 +841,7 @@ function isValidPipelineUrl(url) {
|
|
|
820
841
|
*/
|
|
821
842
|
function validatePipeline(pipeline) {
|
|
822
843
|
// TODO: [๐ง ] Maybe test if promptbook is a promise and make specific error case for that
|
|
823
|
-
var e_1, _a, e_2, _b, e_3, _c, e_4, _d;
|
|
844
|
+
var e_1, _a, e_2, _b, e_3, _c, e_4, _d, e_5, _e;
|
|
824
845
|
if (pipeline.pipelineUrl !== undefined && !isValidPipelineUrl(pipeline.pipelineUrl)) {
|
|
825
846
|
// <- Note: [๐ฒ]
|
|
826
847
|
throw new PipelineLogicError("Invalid promptbook URL \"".concat(pipeline.pipelineUrl, "\""));
|
|
@@ -857,19 +878,19 @@ function validatePipeline(pipeline) {
|
|
|
857
878
|
};
|
|
858
879
|
try {
|
|
859
880
|
// Note: Check each parameter individually
|
|
860
|
-
for (var
|
|
861
|
-
var parameter =
|
|
881
|
+
for (var _f = __values(pipeline.parameters), _g = _f.next(); !_g.done; _g = _f.next()) {
|
|
882
|
+
var parameter = _g.value;
|
|
862
883
|
_loop_1(parameter);
|
|
863
884
|
}
|
|
864
885
|
}
|
|
865
886
|
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
866
887
|
finally {
|
|
867
888
|
try {
|
|
868
|
-
if (
|
|
889
|
+
if (_g && !_g.done && (_a = _f.return)) _a.call(_f);
|
|
869
890
|
}
|
|
870
891
|
finally { if (e_1) throw e_1.error; }
|
|
871
892
|
}
|
|
872
|
-
// Note:
|
|
893
|
+
// Note: All input parameters are defined - so that they can be used as result of some template
|
|
873
894
|
var definedParameters = new Set(pipeline.parameters.filter(function (_a) {
|
|
874
895
|
var isInput = _a.isInput;
|
|
875
896
|
return isInput;
|
|
@@ -878,23 +899,27 @@ function validatePipeline(pipeline) {
|
|
|
878
899
|
return name;
|
|
879
900
|
}));
|
|
880
901
|
try {
|
|
881
|
-
|
|
882
|
-
|
|
902
|
+
// Note: Checking each template individually
|
|
903
|
+
for (var _h = __values(pipeline.promptTemplates), _j = _h.next(); !_j.done; _j = _h.next()) {
|
|
904
|
+
var template = _j.value;
|
|
883
905
|
if (definedParameters.has(template.resultingParameterName)) {
|
|
884
906
|
throw new PipelineLogicError("Parameter {".concat(template.resultingParameterName, "} is defined multiple times"));
|
|
885
907
|
}
|
|
908
|
+
if (RESERVED_PARAMETER_NAMES.includes(template.resultingParameterName)) {
|
|
909
|
+
throw new PipelineLogicError("Parameter name {".concat(template.resultingParameterName, "} is reserved, please use fifferent name"));
|
|
910
|
+
}
|
|
886
911
|
definedParameters.add(template.resultingParameterName);
|
|
887
912
|
if (template.blockType === 'PROMPT_TEMPLATE' && template.modelRequirements.modelVariant === undefined) {
|
|
888
913
|
throw new PipelineLogicError(spaceTrim$1("\n\n You must specify MODEL VARIANT in the prompt template \"".concat(template.title, "\"\n\n For example:\n - MODEL VARIANT Chat\n - MODEL NAME `gpt-4-1106-preview`\n\n ")));
|
|
889
914
|
}
|
|
890
|
-
if (template.
|
|
915
|
+
if (template.jokerParameterNames && template.jokerParameterNames.length > 0) {
|
|
891
916
|
if (!template.expectFormat &&
|
|
892
917
|
!template.expectations /* <- TODO: Require at least 1 -> min <- expectation to use jokers */) {
|
|
893
918
|
throw new PipelineLogicError("Joker parameters are used for {".concat(template.resultingParameterName, "} but no expectations are defined"));
|
|
894
919
|
}
|
|
895
920
|
try {
|
|
896
|
-
for (var
|
|
897
|
-
var joker =
|
|
921
|
+
for (var _k = (e_3 = void 0, __values(template.jokerParameterNames)), _l = _k.next(); !_l.done; _l = _k.next()) {
|
|
922
|
+
var joker = _l.value;
|
|
898
923
|
if (!template.dependentParameterNames.includes(joker)) {
|
|
899
924
|
throw new PipelineLogicError("Parameter {".concat(joker, "} is used for {").concat(template.resultingParameterName, "} as joker but not in dependentParameterNames"));
|
|
900
925
|
}
|
|
@@ -903,15 +928,15 @@ function validatePipeline(pipeline) {
|
|
|
903
928
|
catch (e_3_1) { e_3 = { error: e_3_1 }; }
|
|
904
929
|
finally {
|
|
905
930
|
try {
|
|
906
|
-
if (
|
|
931
|
+
if (_l && !_l.done && (_c = _k.return)) _c.call(_k);
|
|
907
932
|
}
|
|
908
933
|
finally { if (e_3) throw e_3.error; }
|
|
909
934
|
}
|
|
910
935
|
}
|
|
911
936
|
if (template.expectations) {
|
|
912
937
|
try {
|
|
913
|
-
for (var
|
|
914
|
-
var
|
|
938
|
+
for (var _m = (e_4 = void 0, __values(Object.entries(template.expectations))), _o = _m.next(); !_o.done; _o = _m.next()) {
|
|
939
|
+
var _p = __read(_o.value, 2), unit = _p[0], _q = _p[1], min = _q.min, max = _q.max;
|
|
915
940
|
if (min !== undefined && max !== undefined && min > max) {
|
|
916
941
|
throw new PipelineLogicError("Min expectation (=".concat(min, ") of ").concat(unit, " is higher than max expectation (=").concat(max, ")"));
|
|
917
942
|
}
|
|
@@ -926,7 +951,7 @@ function validatePipeline(pipeline) {
|
|
|
926
951
|
catch (e_4_1) { e_4 = { error: e_4_1 }; }
|
|
927
952
|
finally {
|
|
928
953
|
try {
|
|
929
|
-
if (
|
|
954
|
+
if (_o && !_o.done && (_d = _m.return)) _d.call(_m);
|
|
930
955
|
}
|
|
931
956
|
finally { if (e_4) throw e_4.error; }
|
|
932
957
|
}
|
|
@@ -936,7 +961,7 @@ function validatePipeline(pipeline) {
|
|
|
936
961
|
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
937
962
|
finally {
|
|
938
963
|
try {
|
|
939
|
-
if (
|
|
964
|
+
if (_j && !_j.done && (_b = _h.return)) _b.call(_h);
|
|
940
965
|
}
|
|
941
966
|
finally { if (e_2) throw e_2.error; }
|
|
942
967
|
}
|
|
@@ -950,7 +975,22 @@ function validatePipeline(pipeline) {
|
|
|
950
975
|
var name = _a.name;
|
|
951
976
|
return name;
|
|
952
977
|
});
|
|
978
|
+
try {
|
|
979
|
+
// Note: All reserved parameters are resolved
|
|
980
|
+
for (var RESERVED_PARAMETER_NAMES_1 = __values(RESERVED_PARAMETER_NAMES), RESERVED_PARAMETER_NAMES_1_1 = RESERVED_PARAMETER_NAMES_1.next(); !RESERVED_PARAMETER_NAMES_1_1.done; RESERVED_PARAMETER_NAMES_1_1 = RESERVED_PARAMETER_NAMES_1.next()) {
|
|
981
|
+
var reservedParameterName = RESERVED_PARAMETER_NAMES_1_1.value;
|
|
982
|
+
resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), [reservedParameterName], false);
|
|
983
|
+
}
|
|
984
|
+
}
|
|
985
|
+
catch (e_5_1) { e_5 = { error: e_5_1 }; }
|
|
986
|
+
finally {
|
|
987
|
+
try {
|
|
988
|
+
if (RESERVED_PARAMETER_NAMES_1_1 && !RESERVED_PARAMETER_NAMES_1_1.done && (_e = RESERVED_PARAMETER_NAMES_1.return)) _e.call(RESERVED_PARAMETER_NAMES_1);
|
|
989
|
+
}
|
|
990
|
+
finally { if (e_5) throw e_5.error; }
|
|
991
|
+
}
|
|
953
992
|
var unresovedTemplates = __spreadArray([], __read(pipeline.promptTemplates), false);
|
|
993
|
+
// <- TODO: [๐ง ][๐ฅ]
|
|
954
994
|
var loopLimit = LOOP_LIMIT;
|
|
955
995
|
var _loop_2 = function () {
|
|
956
996
|
if (loopLimit-- < 0) {
|
|
@@ -964,11 +1004,11 @@ function validatePipeline(pipeline) {
|
|
|
964
1004
|
throw new PipelineLogicError(spaceTrim$1(function (block) { return "\n\n Can not resolve some parameters:\n Either you are using a parameter that is not defined, or there are some circular dependencies.\n\n Can not resolve:\n ".concat(block(unresovedTemplates
|
|
965
1005
|
.map(function (_a) {
|
|
966
1006
|
var resultingParameterName = _a.resultingParameterName, dependentParameterNames = _a.dependentParameterNames;
|
|
967
|
-
return "- {".concat(resultingParameterName, "} depends on ").concat(dependentParameterNames
|
|
1007
|
+
return "- Parameter {".concat(resultingParameterName, "} which depends on ").concat(dependentParameterNames
|
|
968
1008
|
.map(function (dependentParameterName) { return "{".concat(dependentParameterName, "}"); })
|
|
969
|
-
.join('
|
|
1009
|
+
.join(' and '));
|
|
970
1010
|
})
|
|
971
|
-
.join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- {".concat(name, "}"); }).join('\n')), "\n "); }));
|
|
1011
|
+
.join('\n')), "\n\n Resolved:\n ").concat(block(resovedParameters.map(function (name) { return "- Parameter {".concat(name, "}"); }).join('\n')), "\n "); }));
|
|
972
1012
|
}
|
|
973
1013
|
resovedParameters = __spreadArray(__spreadArray([], __read(resovedParameters), false), __read(currentlyResovedTemplates.map(function (_a) {
|
|
974
1014
|
var resultingParameterName = _a.resultingParameterName;
|
|
@@ -995,6 +1035,9 @@ function validatePipeline(pipeline) {
|
|
|
995
1035
|
/**
|
|
996
1036
|
* TODO: [๐ง ][๐ฃ] !!!! Validate that all samples match expectations
|
|
997
1037
|
* TODO: [๐ง ][๐ฃ] !!!! Validate that knowledge is valid (non-void)
|
|
1038
|
+
* TODO: [๐ง ][๐ฃ] !!!! Validate that persona can be used only with CHAT variant
|
|
1039
|
+
* TODO: !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
|
|
1040
|
+
* TODO: !!!! Validate that reserved parameter is not used as joker
|
|
998
1041
|
* TODO: [๐ง ] !!! Validationg not only logic itself but imports around - files and websites and rerefenced pipelines exists
|
|
999
1042
|
* TODO: [๐ ] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
1000
1043
|
*/
|
|
@@ -1503,6 +1546,212 @@ function assertsExecutionSuccessful(executionResult) {
|
|
|
1503
1546
|
* TODO: [๐ง ] Can this return type be better typed than void
|
|
1504
1547
|
*/
|
|
1505
1548
|
|
|
1549
|
+
/**
|
|
1550
|
+
* Create difference set of two sets.
|
|
1551
|
+
*
|
|
1552
|
+
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
|
|
1553
|
+
*/
|
|
1554
|
+
function difference(a, b, isEqual) {
|
|
1555
|
+
var e_1, _a;
|
|
1556
|
+
if (isEqual === void 0) { isEqual = function (a, b) { return a === b; }; }
|
|
1557
|
+
var diff = new Set();
|
|
1558
|
+
var _loop_1 = function (itemA) {
|
|
1559
|
+
if (!Array.from(b).some(function (itemB) { return isEqual(itemA, itemB); })) {
|
|
1560
|
+
diff.add(itemA);
|
|
1561
|
+
}
|
|
1562
|
+
};
|
|
1563
|
+
try {
|
|
1564
|
+
for (var _b = __values(Array.from(a)), _c = _b.next(); !_c.done; _c = _b.next()) {
|
|
1565
|
+
var itemA = _c.value;
|
|
1566
|
+
_loop_1(itemA);
|
|
1567
|
+
}
|
|
1568
|
+
}
|
|
1569
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
1570
|
+
finally {
|
|
1571
|
+
try {
|
|
1572
|
+
if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
|
|
1573
|
+
}
|
|
1574
|
+
finally { if (e_1) throw e_1.error; }
|
|
1575
|
+
}
|
|
1576
|
+
return diff;
|
|
1577
|
+
}
|
|
1578
|
+
/**
|
|
1579
|
+
* TODO: [๐ง ][๐ฏ] Maybe also implement symmetricDifference
|
|
1580
|
+
*/
|
|
1581
|
+
|
|
1582
|
+
/**
|
|
1583
|
+
* Parses the template and returns the list of all parameter names
|
|
1584
|
+
*
|
|
1585
|
+
* @param template the template with parameters in {curly} braces
|
|
1586
|
+
* @returns the list of parameter names
|
|
1587
|
+
*/
|
|
1588
|
+
function extractParameters(template) {
|
|
1589
|
+
var e_1, _a;
|
|
1590
|
+
var matches = template.matchAll(/{\w+}/g);
|
|
1591
|
+
var parameterNames = new Set();
|
|
1592
|
+
try {
|
|
1593
|
+
for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
|
|
1594
|
+
var match = matches_1_1.value;
|
|
1595
|
+
var parameterName = match[0].slice(1, -1);
|
|
1596
|
+
parameterNames.add(parameterName);
|
|
1597
|
+
}
|
|
1598
|
+
}
|
|
1599
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
1600
|
+
finally {
|
|
1601
|
+
try {
|
|
1602
|
+
if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
|
|
1603
|
+
}
|
|
1604
|
+
finally { if (e_1) throw e_1.error; }
|
|
1605
|
+
}
|
|
1606
|
+
return parameterNames;
|
|
1607
|
+
}
|
|
1608
|
+
|
|
1609
|
+
/**
|
|
1610
|
+
* Parses the given script and returns the list of all used variables that are not defined in the script
|
|
1611
|
+
*
|
|
1612
|
+
* @param script from which to extract the variables
|
|
1613
|
+
* @returns the list of variable names
|
|
1614
|
+
* @throws {ParsingError} if the script is invalid
|
|
1615
|
+
*/
|
|
1616
|
+
function extractVariables(script) {
|
|
1617
|
+
var variables = new Set();
|
|
1618
|
+
script = "(()=>{".concat(script, "})()");
|
|
1619
|
+
try {
|
|
1620
|
+
for (var i = 0; i < 100 /* <- TODO: This limit to configuration */; i++)
|
|
1621
|
+
try {
|
|
1622
|
+
eval(script);
|
|
1623
|
+
}
|
|
1624
|
+
catch (error) {
|
|
1625
|
+
if (!(error instanceof ReferenceError)) {
|
|
1626
|
+
throw error;
|
|
1627
|
+
}
|
|
1628
|
+
var undefinedName = error.message.split(' ')[0];
|
|
1629
|
+
/*
|
|
1630
|
+
Note: Parsing the error
|
|
1631
|
+
[ReferenceError: thing is not defined]
|
|
1632
|
+
*/
|
|
1633
|
+
if (!undefinedName) {
|
|
1634
|
+
throw error;
|
|
1635
|
+
}
|
|
1636
|
+
if (script.includes(undefinedName + '(')) {
|
|
1637
|
+
script = "const ".concat(undefinedName, " = ()=>'';") + script;
|
|
1638
|
+
}
|
|
1639
|
+
else {
|
|
1640
|
+
variables.add(undefinedName);
|
|
1641
|
+
script = "const ".concat(undefinedName, " = '';") + script;
|
|
1642
|
+
}
|
|
1643
|
+
}
|
|
1644
|
+
}
|
|
1645
|
+
catch (error) {
|
|
1646
|
+
if (!(error instanceof Error)) {
|
|
1647
|
+
throw error;
|
|
1648
|
+
}
|
|
1649
|
+
throw new ParsingError(spaceTrim$1(function (block) { return "\n Can not extract variables from the script\n\n ".concat(block(error.name), ": ").concat(block(error.message), "\n "); }));
|
|
1650
|
+
}
|
|
1651
|
+
return variables;
|
|
1652
|
+
}
|
|
1653
|
+
/**
|
|
1654
|
+
* TODO: [๐ฃ] Support for multiple languages - python, java,...
|
|
1655
|
+
*/
|
|
1656
|
+
|
|
1657
|
+
/**
|
|
1658
|
+
* Parses the prompt template and returns the set of all used parameters
|
|
1659
|
+
*
|
|
1660
|
+
* @param promptTemplate the template with used parameters
|
|
1661
|
+
* @returns the set of parameter names
|
|
1662
|
+
* @throws {ParsingError} if the script is invalid
|
|
1663
|
+
*/
|
|
1664
|
+
function extractParametersFromPromptTemplate(promptTemplate) {
|
|
1665
|
+
var e_1, _a, e_2, _b, e_3, _c;
|
|
1666
|
+
var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, jokerParameterNames = promptTemplate.jokerParameterNames;
|
|
1667
|
+
var parameterNames = new Set();
|
|
1668
|
+
try {
|
|
1669
|
+
for (var _d = __values(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(title)), false), __read(extractParameters(description || '')), false), __read(extractParameters(content)), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
|
|
1670
|
+
var parameterName = _e.value;
|
|
1671
|
+
parameterNames.add(parameterName);
|
|
1672
|
+
}
|
|
1673
|
+
}
|
|
1674
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
1675
|
+
finally {
|
|
1676
|
+
try {
|
|
1677
|
+
if (_e && !_e.done && (_a = _d.return)) _a.call(_d);
|
|
1678
|
+
}
|
|
1679
|
+
finally { if (e_1) throw e_1.error; }
|
|
1680
|
+
}
|
|
1681
|
+
if (blockType === 'SCRIPT') {
|
|
1682
|
+
try {
|
|
1683
|
+
for (var _f = __values(extractVariables(content)), _g = _f.next(); !_g.done; _g = _f.next()) {
|
|
1684
|
+
var parameterName = _g.value;
|
|
1685
|
+
parameterNames.add(parameterName);
|
|
1686
|
+
}
|
|
1687
|
+
}
|
|
1688
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
1689
|
+
finally {
|
|
1690
|
+
try {
|
|
1691
|
+
if (_g && !_g.done && (_b = _f.return)) _b.call(_f);
|
|
1692
|
+
}
|
|
1693
|
+
finally { if (e_2) throw e_2.error; }
|
|
1694
|
+
}
|
|
1695
|
+
}
|
|
1696
|
+
try {
|
|
1697
|
+
for (var _h = __values(jokerParameterNames || []), _j = _h.next(); !_j.done; _j = _h.next()) {
|
|
1698
|
+
var jokerName = _j.value;
|
|
1699
|
+
parameterNames.add(jokerName);
|
|
1700
|
+
}
|
|
1701
|
+
}
|
|
1702
|
+
catch (e_3_1) { e_3 = { error: e_3_1 }; }
|
|
1703
|
+
finally {
|
|
1704
|
+
try {
|
|
1705
|
+
if (_j && !_j.done && (_c = _h.return)) _c.call(_h);
|
|
1706
|
+
}
|
|
1707
|
+
finally { if (e_3) throw e_3.error; }
|
|
1708
|
+
}
|
|
1709
|
+
return parameterNames;
|
|
1710
|
+
}
|
|
1711
|
+
/**
|
|
1712
|
+
* TODO: [๐ฃ] If script require contentLanguage
|
|
1713
|
+
*/
|
|
1714
|
+
|
|
1715
|
+
/**
|
|
1716
|
+
* Creates a new set with all elements that are present in either set
|
|
1717
|
+
*
|
|
1718
|
+
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
|
|
1719
|
+
*/
|
|
1720
|
+
function union() {
|
|
1721
|
+
var e_1, _a, e_2, _b;
|
|
1722
|
+
var sets = [];
|
|
1723
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
1724
|
+
sets[_i] = arguments[_i];
|
|
1725
|
+
}
|
|
1726
|
+
var union = new Set();
|
|
1727
|
+
try {
|
|
1728
|
+
for (var sets_1 = __values(sets), sets_1_1 = sets_1.next(); !sets_1_1.done; sets_1_1 = sets_1.next()) {
|
|
1729
|
+
var set = sets_1_1.value;
|
|
1730
|
+
try {
|
|
1731
|
+
for (var _c = (e_2 = void 0, __values(Array.from(set))), _d = _c.next(); !_d.done; _d = _c.next()) {
|
|
1732
|
+
var item = _d.value;
|
|
1733
|
+
union.add(item);
|
|
1734
|
+
}
|
|
1735
|
+
}
|
|
1736
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
1737
|
+
finally {
|
|
1738
|
+
try {
|
|
1739
|
+
if (_d && !_d.done && (_b = _c.return)) _b.call(_c);
|
|
1740
|
+
}
|
|
1741
|
+
finally { if (e_2) throw e_2.error; }
|
|
1742
|
+
}
|
|
1743
|
+
}
|
|
1744
|
+
}
|
|
1745
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
1746
|
+
finally {
|
|
1747
|
+
try {
|
|
1748
|
+
if (sets_1_1 && !sets_1_1.done && (_a = sets_1.return)) _a.call(sets_1);
|
|
1749
|
+
}
|
|
1750
|
+
finally { if (e_1) throw e_1.error; }
|
|
1751
|
+
}
|
|
1752
|
+
return union;
|
|
1753
|
+
}
|
|
1754
|
+
|
|
1506
1755
|
/**
|
|
1507
1756
|
* This error occurs when some expectation is not met in the execution of the pipeline
|
|
1508
1757
|
*
|
|
@@ -1775,6 +2024,31 @@ function joinLlmExecutionTools() {
|
|
|
1775
2024
|
return new (MultipleLlmExecutionTools.bind.apply(MultipleLlmExecutionTools, __spreadArray([void 0], __read(llmExecutionTools), false)))();
|
|
1776
2025
|
}
|
|
1777
2026
|
|
|
2027
|
+
/**
|
|
2028
|
+
* Determine if the pipeline is fully prepared
|
|
2029
|
+
*/
|
|
2030
|
+
function isPipelinePrepared(pipeline) {
|
|
2031
|
+
// Note: Ignoring `pipeline.preparations` @@@
|
|
2032
|
+
// Note: Ignoring `pipeline.knowledgePieces` @@@
|
|
2033
|
+
if (!pipeline.personas.every(function (persona) { return persona.modelRequirements !== undefined; })) {
|
|
2034
|
+
console.log('!!!!', 'Not all personas have modelRequirements');
|
|
2035
|
+
return false;
|
|
2036
|
+
}
|
|
2037
|
+
if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
|
|
2038
|
+
console.log('!!!!', 'Not all knowledgeSources have preparationIds');
|
|
2039
|
+
return false;
|
|
2040
|
+
}
|
|
2041
|
+
// TODO: !!!!! Is context in each template
|
|
2042
|
+
// TODO: !!!!! Are samples prepared
|
|
2043
|
+
// TODO: !!!!! Are templates prepared
|
|
2044
|
+
return true;
|
|
2045
|
+
}
|
|
2046
|
+
/**
|
|
2047
|
+
* TODO: [๐ ] Maybe base this on `makeValidator`
|
|
2048
|
+
* TODO: [๐ผ] Export via core or utils
|
|
2049
|
+
* TODO: [๐ง] Pipeline can be partially prepared, this should return true ONLY if fully prepared
|
|
2050
|
+
*/
|
|
2051
|
+
|
|
1778
2052
|
/**
|
|
1779
2053
|
* Takes an item or an array of items and returns an array of items
|
|
1780
2054
|
*
|
|
@@ -1795,29 +2069,118 @@ function arrayableToArray(input) {
|
|
|
1795
2069
|
}
|
|
1796
2070
|
|
|
1797
2071
|
/**
|
|
1798
|
-
*
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
*
|
|
2072
|
+
* Just marks a place of place where should be something implemented
|
|
2073
|
+
* No side effects.
|
|
2074
|
+
*
|
|
2075
|
+
* Note: It can be usefull suppressing eslint errors of unused variables
|
|
2076
|
+
*
|
|
2077
|
+
* @param value any values
|
|
2078
|
+
* @returns void
|
|
2079
|
+
* @private within the repository
|
|
1805
2080
|
*/
|
|
1806
|
-
function
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
|
|
1812
|
-
text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
|
|
1813
|
-
return text.length;
|
|
2081
|
+
function TODO_USE() {
|
|
2082
|
+
var value = [];
|
|
2083
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
2084
|
+
value[_i] = arguments[_i];
|
|
2085
|
+
}
|
|
1814
2086
|
}
|
|
1815
2087
|
|
|
1816
2088
|
/**
|
|
1817
|
-
*
|
|
2089
|
+
* This error type indicates that some limit was reached
|
|
1818
2090
|
*/
|
|
1819
|
-
function
|
|
1820
|
-
|
|
2091
|
+
var LimitReachedError = /** @class */ (function (_super) {
|
|
2092
|
+
__extends(LimitReachedError, _super);
|
|
2093
|
+
function LimitReachedError(message) {
|
|
2094
|
+
var _this = _super.call(this, message) || this;
|
|
2095
|
+
_this.name = 'LimitReachedError';
|
|
2096
|
+
Object.setPrototypeOf(_this, LimitReachedError.prototype);
|
|
2097
|
+
return _this;
|
|
2098
|
+
}
|
|
2099
|
+
return LimitReachedError;
|
|
2100
|
+
}(Error));
|
|
2101
|
+
|
|
2102
|
+
/**
|
|
2103
|
+
* Replaces parameters in template with values from parameters object
|
|
2104
|
+
*
|
|
2105
|
+
* @param template the template with parameters in {curly} braces
|
|
2106
|
+
* @param parameters the object with parameters
|
|
2107
|
+
* @returns the template with replaced parameters
|
|
2108
|
+
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
2109
|
+
*/
|
|
2110
|
+
function replaceParameters(template, parameters) {
|
|
2111
|
+
var replacedTemplate = template;
|
|
2112
|
+
var match;
|
|
2113
|
+
var loopLimit = LOOP_LIMIT;
|
|
2114
|
+
var _loop_1 = function () {
|
|
2115
|
+
if (loopLimit-- < 0) {
|
|
2116
|
+
throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
|
|
2117
|
+
}
|
|
2118
|
+
var precol = match.groups.precol;
|
|
2119
|
+
var parameterName = match.groups.parameterName;
|
|
2120
|
+
if (parameterName === '') {
|
|
2121
|
+
return "continue";
|
|
2122
|
+
}
|
|
2123
|
+
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
2124
|
+
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
2125
|
+
}
|
|
2126
|
+
if (parameters[parameterName] === undefined) {
|
|
2127
|
+
throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
|
|
2128
|
+
}
|
|
2129
|
+
var parameterValue = parameters[parameterName];
|
|
2130
|
+
if (parameterValue === undefined) {
|
|
2131
|
+
throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
|
|
2132
|
+
}
|
|
2133
|
+
parameterValue = parameterValue.toString();
|
|
2134
|
+
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
2135
|
+
parameterValue = parameterValue
|
|
2136
|
+
.split('\n')
|
|
2137
|
+
.map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
|
|
2138
|
+
.join('\n');
|
|
2139
|
+
}
|
|
2140
|
+
replacedTemplate =
|
|
2141
|
+
replacedTemplate.substring(0, match.index + precol.length) +
|
|
2142
|
+
parameterValue +
|
|
2143
|
+
replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
|
|
2144
|
+
};
|
|
2145
|
+
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
2146
|
+
.exec(replacedTemplate))) {
|
|
2147
|
+
_loop_1();
|
|
2148
|
+
}
|
|
2149
|
+
// [๐ซ] Check if there are parameters that are not closed properly
|
|
2150
|
+
if (/{\w+$/.test(replacedTemplate)) {
|
|
2151
|
+
throw new PipelineExecutionError('Parameter is not closed');
|
|
2152
|
+
}
|
|
2153
|
+
// [๐ซ] Check if there are parameters that are not opened properly
|
|
2154
|
+
if (/^\w+}/.test(replacedTemplate)) {
|
|
2155
|
+
throw new PipelineExecutionError('Parameter is not opened');
|
|
2156
|
+
}
|
|
2157
|
+
return replacedTemplate;
|
|
2158
|
+
}
|
|
2159
|
+
|
|
2160
|
+
/**
|
|
2161
|
+
* The version of the Promptbook library
|
|
2162
|
+
*/
|
|
2163
|
+
var PROMPTBOOK_VERSION = '0.61.0-16';
|
|
2164
|
+
// TODO: !!!! List here all the versions and annotate + put into script
|
|
2165
|
+
|
|
2166
|
+
/**
|
|
2167
|
+
* Counts number of characters in the text
|
|
2168
|
+
*/
|
|
2169
|
+
function countCharacters(text) {
|
|
2170
|
+
// Remove null characters
|
|
2171
|
+
text = text.replace(/\0/g, '');
|
|
2172
|
+
// Replace emojis (and also ZWJ sequence) with hyphens
|
|
2173
|
+
text = text.replace(/(\p{Extended_Pictographic})\p{Modifier_Symbol}/gu, '$1');
|
|
2174
|
+
text = text.replace(/(\p{Extended_Pictographic})[\u{FE00}-\u{FE0F}]/gu, '$1');
|
|
2175
|
+
text = text.replace(/\p{Extended_Pictographic}(\u{200D}\p{Extended_Pictographic})*/gu, '-');
|
|
2176
|
+
return text.length;
|
|
2177
|
+
}
|
|
2178
|
+
|
|
2179
|
+
/**
|
|
2180
|
+
* Counts number of lines in the text
|
|
2181
|
+
*/
|
|
2182
|
+
function countLines(text) {
|
|
2183
|
+
if (text === '') {
|
|
1821
2184
|
return 0;
|
|
1822
2185
|
}
|
|
1823
2186
|
return text.split('\n').length;
|
|
@@ -1910,80 +2273,6 @@ function checkExpectations(expectations, value) {
|
|
|
1910
2273
|
* TODO: [๐] Unite object for expecting amount and format
|
|
1911
2274
|
*/
|
|
1912
2275
|
|
|
1913
|
-
/**
|
|
1914
|
-
* This error type indicates that some limit was reached
|
|
1915
|
-
*/
|
|
1916
|
-
var LimitReachedError = /** @class */ (function (_super) {
|
|
1917
|
-
__extends(LimitReachedError, _super);
|
|
1918
|
-
function LimitReachedError(message) {
|
|
1919
|
-
var _this = _super.call(this, message) || this;
|
|
1920
|
-
_this.name = 'LimitReachedError';
|
|
1921
|
-
Object.setPrototypeOf(_this, LimitReachedError.prototype);
|
|
1922
|
-
return _this;
|
|
1923
|
-
}
|
|
1924
|
-
return LimitReachedError;
|
|
1925
|
-
}(Error));
|
|
1926
|
-
|
|
1927
|
-
/**
|
|
1928
|
-
* Replaces parameters in template with values from parameters object
|
|
1929
|
-
*
|
|
1930
|
-
* @param template the template with parameters in {curly} braces
|
|
1931
|
-
* @param parameters the object with parameters
|
|
1932
|
-
* @returns the template with replaced parameters
|
|
1933
|
-
* @throws {PipelineExecutionError} if parameter is not defined, not closed, or not opened
|
|
1934
|
-
*
|
|
1935
|
-
* @private within the createPipelineExecutor
|
|
1936
|
-
*/
|
|
1937
|
-
function replaceParameters(template, parameters) {
|
|
1938
|
-
var replacedTemplate = template;
|
|
1939
|
-
var match;
|
|
1940
|
-
var loopLimit = LOOP_LIMIT;
|
|
1941
|
-
var _loop_1 = function () {
|
|
1942
|
-
if (loopLimit-- < 0) {
|
|
1943
|
-
throw new LimitReachedError('Loop limit reached during parameters replacement in `replaceParameters`');
|
|
1944
|
-
}
|
|
1945
|
-
var precol = match.groups.precol;
|
|
1946
|
-
var parameterName = match.groups.parameterName;
|
|
1947
|
-
if (parameterName === '') {
|
|
1948
|
-
return "continue";
|
|
1949
|
-
}
|
|
1950
|
-
if (parameterName.indexOf('{') !== -1 || parameterName.indexOf('}') !== -1) {
|
|
1951
|
-
throw new PipelineExecutionError('Parameter is already opened or not closed');
|
|
1952
|
-
}
|
|
1953
|
-
if (parameters[parameterName] === undefined) {
|
|
1954
|
-
throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
|
|
1955
|
-
}
|
|
1956
|
-
var parameterValue = parameters[parameterName];
|
|
1957
|
-
if (parameterValue === undefined) {
|
|
1958
|
-
throw new PipelineExecutionError("Parameter {".concat(parameterName, "} is not defined"));
|
|
1959
|
-
}
|
|
1960
|
-
parameterValue = parameterValue.toString();
|
|
1961
|
-
if (parameterValue.includes('\n') && /^\s*\W{0,3}\s*$/.test(precol)) {
|
|
1962
|
-
parameterValue = parameterValue
|
|
1963
|
-
.split('\n')
|
|
1964
|
-
.map(function (line, index) { return (index === 0 ? line : "".concat(precol).concat(line)); })
|
|
1965
|
-
.join('\n');
|
|
1966
|
-
}
|
|
1967
|
-
replacedTemplate =
|
|
1968
|
-
replacedTemplate.substring(0, match.index + precol.length) +
|
|
1969
|
-
parameterValue +
|
|
1970
|
-
replacedTemplate.substring(match.index + precol.length + parameterName.length + 2);
|
|
1971
|
-
};
|
|
1972
|
-
while ((match = /^(?<precol>.*){(?<parameterName>\w+)}(.*)/m /* <- Not global */
|
|
1973
|
-
.exec(replacedTemplate))) {
|
|
1974
|
-
_loop_1();
|
|
1975
|
-
}
|
|
1976
|
-
// [๐ซ] Check if there are parameters that are not closed properly
|
|
1977
|
-
if (/{\w+$/.test(replacedTemplate)) {
|
|
1978
|
-
throw new PipelineExecutionError('Parameter is not closed');
|
|
1979
|
-
}
|
|
1980
|
-
// [๐ซ] Check if there are parameters that are not opened properly
|
|
1981
|
-
if (/^\w+}/.test(replacedTemplate)) {
|
|
1982
|
-
throw new PipelineExecutionError('Parameter is not opened');
|
|
1983
|
-
}
|
|
1984
|
-
return replacedTemplate;
|
|
1985
|
-
}
|
|
1986
|
-
|
|
1987
2276
|
/**
|
|
1988
2277
|
* Creates executor function from pipeline and execution tools.
|
|
1989
2278
|
*
|
|
@@ -1992,18 +2281,73 @@ function replaceParameters(template, parameters) {
|
|
|
1992
2281
|
*/
|
|
1993
2282
|
function createPipelineExecutor(options) {
|
|
1994
2283
|
var _this = this;
|
|
1995
|
-
var
|
|
1996
|
-
var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ?
|
|
1997
|
-
validatePipeline(
|
|
2284
|
+
var rawPipeline = options.pipeline, tools = options.tools, _a = options.settings, settings = _a === void 0 ? {} : _a;
|
|
2285
|
+
var _b = settings.maxExecutionAttempts, maxExecutionAttempts = _b === void 0 ? MAX_EXECUTION_ATTEMPTS : _b, _c = settings.maxParallelCount, maxParallelCount = _c === void 0 ? MAX_PARALLEL_COUNT : _c, _d = settings.isVerbose, isVerbose = _d === void 0 ? false : _d;
|
|
2286
|
+
validatePipeline(rawPipeline);
|
|
1998
2287
|
var llmTools = joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(arrayableToArray(tools.llm)), false));
|
|
2288
|
+
var pipeline;
|
|
2289
|
+
if (isPipelinePrepared(rawPipeline)) {
|
|
2290
|
+
pipeline = rawPipeline;
|
|
2291
|
+
}
|
|
2292
|
+
else {
|
|
2293
|
+
console.warn(spaceTrim$1("\n Pipeline is not prepared\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n "));
|
|
2294
|
+
}
|
|
1999
2295
|
var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
|
|
2296
|
+
function getContextForTemplate(// <- TODO: [๐ง ][๐ฅ]
|
|
2297
|
+
template) {
|
|
2298
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2299
|
+
return __generator(this, function (_a) {
|
|
2300
|
+
// TODO: !!!!!! Implement Better - use real index and keyword search
|
|
2301
|
+
TODO_USE(template);
|
|
2302
|
+
return [2 /*return*/, pipeline.knowledgePieces.map(function (_a) {
|
|
2303
|
+
var content = _a.content;
|
|
2304
|
+
return "- ".concat(content);
|
|
2305
|
+
}).join('\n')];
|
|
2306
|
+
});
|
|
2307
|
+
});
|
|
2308
|
+
}
|
|
2309
|
+
function getReservedParametersForTemplate(template) {
|
|
2310
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
2311
|
+
var context, currentDate, reservedParameters, RESERVED_PARAMETER_NAMES_1, RESERVED_PARAMETER_NAMES_1_1, parameterName;
|
|
2312
|
+
var e_3, _a;
|
|
2313
|
+
return __generator(this, function (_b) {
|
|
2314
|
+
switch (_b.label) {
|
|
2315
|
+
case 0: return [4 /*yield*/, getContextForTemplate(template)];
|
|
2316
|
+
case 1:
|
|
2317
|
+
context = _b.sent();
|
|
2318
|
+
currentDate = new Date().toISOString();
|
|
2319
|
+
reservedParameters = {
|
|
2320
|
+
context: context,
|
|
2321
|
+
currentDate: currentDate,
|
|
2322
|
+
};
|
|
2323
|
+
try {
|
|
2324
|
+
// Note: Doublecheck that ALL reserved parameters are defined:
|
|
2325
|
+
for (RESERVED_PARAMETER_NAMES_1 = __values(RESERVED_PARAMETER_NAMES), RESERVED_PARAMETER_NAMES_1_1 = RESERVED_PARAMETER_NAMES_1.next(); !RESERVED_PARAMETER_NAMES_1_1.done; RESERVED_PARAMETER_NAMES_1_1 = RESERVED_PARAMETER_NAMES_1.next()) {
|
|
2326
|
+
parameterName = RESERVED_PARAMETER_NAMES_1_1.value;
|
|
2327
|
+
if (reservedParameters[parameterName] === undefined) {
|
|
2328
|
+
throw new UnexpectedError("Reserved parameter {".concat(parameterName, "} is not defined"));
|
|
2329
|
+
}
|
|
2330
|
+
}
|
|
2331
|
+
}
|
|
2332
|
+
catch (e_3_1) { e_3 = { error: e_3_1 }; }
|
|
2333
|
+
finally {
|
|
2334
|
+
try {
|
|
2335
|
+
if (RESERVED_PARAMETER_NAMES_1_1 && !RESERVED_PARAMETER_NAMES_1_1.done && (_a = RESERVED_PARAMETER_NAMES_1.return)) _a.call(RESERVED_PARAMETER_NAMES_1);
|
|
2336
|
+
}
|
|
2337
|
+
finally { if (e_3) throw e_3.error; }
|
|
2338
|
+
}
|
|
2339
|
+
return [2 /*return*/, reservedParameters];
|
|
2340
|
+
}
|
|
2341
|
+
});
|
|
2342
|
+
});
|
|
2343
|
+
}
|
|
2000
2344
|
function executeSingleTemplate(currentTemplate) {
|
|
2001
2345
|
return __awaiter(this, void 0, void 0, function () {
|
|
2002
|
-
var name, title, priority, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts,
|
|
2003
|
-
var
|
|
2346
|
+
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2347
|
+
var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
|
|
2004
2348
|
var _this = this;
|
|
2005
|
-
return __generator(this, function (
|
|
2006
|
-
switch (
|
|
2349
|
+
return __generator(this, function (_u) {
|
|
2350
|
+
switch (_u.label) {
|
|
2007
2351
|
case 0:
|
|
2008
2352
|
name = "pipeline-executor-frame-".concat(currentTemplate.name);
|
|
2009
2353
|
title = currentTemplate.title;
|
|
@@ -2020,64 +2364,108 @@ function createPipelineExecutor(options) {
|
|
|
2020
2364
|
// <- [3]
|
|
2021
2365
|
})];
|
|
2022
2366
|
case 1:
|
|
2023
|
-
|
|
2024
|
-
|
|
2367
|
+
_u.sent();
|
|
2368
|
+
_u.label = 2;
|
|
2025
2369
|
case 2:
|
|
2370
|
+
usedParameterNames = extractParametersFromPromptTemplate(currentTemplate);
|
|
2371
|
+
dependentParameterNames = new Set(currentTemplate.dependentParameterNames);
|
|
2372
|
+
if (union(difference(usedParameterNames, dependentParameterNames), difference(dependentParameterNames, usedParameterNames)).size !== 0) {
|
|
2373
|
+
throw new UnexpectedError(spaceTrim$1("\n Dependent parameters are not consistent used parameters:\n\n Dependent parameters:\n ".concat(Array.from(dependentParameterNames).join(', '), "\n\n Used parameters:\n ").concat(Array.from(usedParameterNames).join(', '), "\n\n ")));
|
|
2374
|
+
}
|
|
2375
|
+
_b = (_a = Object).freeze;
|
|
2376
|
+
_c = [{}];
|
|
2377
|
+
return [4 /*yield*/, getReservedParametersForTemplate(currentTemplate)];
|
|
2378
|
+
case 3:
|
|
2379
|
+
definedParameters = _b.apply(_a, [__assign.apply(void 0, [__assign.apply(void 0, _c.concat([(_u.sent())])), parametersToPass])]);
|
|
2380
|
+
definedParameterNames = new Set(Object.keys(definedParameters));
|
|
2381
|
+
parameters = {};
|
|
2382
|
+
try {
|
|
2383
|
+
// Note: [2] Check that all used parameters are defined and removing unused parameters for this template
|
|
2384
|
+
for (_d = __values(Array.from(union(definedParameterNames, usedParameterNames, dependentParameterNames))), _e = _d.next(); !_e.done; _e = _d.next()) {
|
|
2385
|
+
parameterName = _e.value;
|
|
2386
|
+
// Situation: Parameter is defined and used
|
|
2387
|
+
if (definedParameterNames.has(parameterName) && usedParameterNames.has(parameterName)) {
|
|
2388
|
+
parameters[parameterName] = definedParameters[parameterName];
|
|
2389
|
+
}
|
|
2390
|
+
// Situation: Parameter is defined but NOT used
|
|
2391
|
+
else if (definedParameterNames.has(parameterName) && !usedParameterNames.has(parameterName)) {
|
|
2392
|
+
// Do not pass this parameter to prompt
|
|
2393
|
+
}
|
|
2394
|
+
// Situation: Parameter is NOT defined BUT used
|
|
2395
|
+
else if (!definedParameterNames.has(parameterName) && usedParameterNames.has(parameterName)) {
|
|
2396
|
+
// Houston, we have a problem
|
|
2397
|
+
// Note: Checking part is also done in `validatePipeline`, but itโs good to doublecheck
|
|
2398
|
+
throw new UnexpectedError(spaceTrim$1("\n Parameter {".concat(parameterName, "} is NOT defined\n BUT used in template \"").concat(currentTemplate.title || currentTemplate.name, "\"\n\n This should be catched in `validatePipeline`\n\n ")));
|
|
2399
|
+
}
|
|
2400
|
+
}
|
|
2401
|
+
}
|
|
2402
|
+
catch (e_7_1) { e_7 = { error: e_7_1 }; }
|
|
2403
|
+
finally {
|
|
2404
|
+
try {
|
|
2405
|
+
if (_e && !_e.done && (_p = _d.return)) _p.call(_d);
|
|
2406
|
+
}
|
|
2407
|
+
finally { if (e_7) throw e_7.error; }
|
|
2408
|
+
}
|
|
2409
|
+
// Note: Now we can freeze `parameters` because we are sure that all and only used parameters are defined
|
|
2410
|
+
Object.freeze(parameters);
|
|
2026
2411
|
result = null;
|
|
2027
2412
|
resultString = null;
|
|
2028
2413
|
expectError = null;
|
|
2029
2414
|
maxAttempts = currentTemplate.blockType === 'PROMPT_DIALOG' ? Infinity : maxExecutionAttempts;
|
|
2030
|
-
|
|
2031
|
-
attempt = -
|
|
2032
|
-
|
|
2033
|
-
case
|
|
2034
|
-
if (!(attempt < maxAttempts)) return [3 /*break*/,
|
|
2415
|
+
jokerParameterNames = currentTemplate.jokerParameterNames || [];
|
|
2416
|
+
attempt = -jokerParameterNames.length;
|
|
2417
|
+
_u.label = 4;
|
|
2418
|
+
case 4:
|
|
2419
|
+
if (!(attempt < maxAttempts)) return [3 /*break*/, 52];
|
|
2035
2420
|
isJokerAttempt = attempt < 0;
|
|
2036
|
-
|
|
2037
|
-
if (isJokerAttempt && !
|
|
2421
|
+
jokerParameterName = jokerParameterNames[jokerParameterNames.length + attempt];
|
|
2422
|
+
if (isJokerAttempt && !jokerParameterName) {
|
|
2038
2423
|
throw new UnexpectedError("Joker not found in attempt ".concat(attempt));
|
|
2039
2424
|
}
|
|
2040
2425
|
result = null;
|
|
2041
2426
|
resultString = null;
|
|
2042
2427
|
expectError = null;
|
|
2043
2428
|
if (isJokerAttempt) {
|
|
2044
|
-
if (
|
|
2045
|
-
throw new PipelineExecutionError("Joker parameter {".concat(
|
|
2429
|
+
if (parameters[jokerParameterName] === undefined) {
|
|
2430
|
+
throw new PipelineExecutionError("Joker parameter {".concat(jokerParameterName, "} not defined"));
|
|
2431
|
+
// <- TODO: This is maybe `PipelineLogicError` which should be detected in `validatePipeline` and here just thrown as `UnexpectedError`
|
|
2432
|
+
}
|
|
2433
|
+
else {
|
|
2434
|
+
resultString = parameters[jokerParameterName];
|
|
2046
2435
|
}
|
|
2047
|
-
resultString = parametersToPass[joker];
|
|
2048
|
-
}
|
|
2049
|
-
_o.label = 4;
|
|
2050
|
-
case 4:
|
|
2051
|
-
_o.trys.push([4, 47, 48, 49]);
|
|
2052
|
-
if (!!isJokerAttempt) return [3 /*break*/, 29];
|
|
2053
|
-
_a = currentTemplate.blockType;
|
|
2054
|
-
switch (_a) {
|
|
2055
|
-
case 'SIMPLE_TEMPLATE': return [3 /*break*/, 5];
|
|
2056
|
-
case 'PROMPT_TEMPLATE': return [3 /*break*/, 6];
|
|
2057
|
-
case 'SCRIPT': return [3 /*break*/, 15];
|
|
2058
|
-
case 'PROMPT_DIALOG': return [3 /*break*/, 26];
|
|
2059
2436
|
}
|
|
2060
|
-
|
|
2437
|
+
_u.label = 5;
|
|
2061
2438
|
case 5:
|
|
2062
|
-
|
|
2439
|
+
_u.trys.push([5, 48, 49, 50]);
|
|
2440
|
+
if (!!isJokerAttempt) return [3 /*break*/, 30];
|
|
2441
|
+
_f = currentTemplate.blockType;
|
|
2442
|
+
switch (_f) {
|
|
2443
|
+
case 'SIMPLE_TEMPLATE': return [3 /*break*/, 6];
|
|
2444
|
+
case 'PROMPT_TEMPLATE': return [3 /*break*/, 7];
|
|
2445
|
+
case 'SCRIPT': return [3 /*break*/, 16];
|
|
2446
|
+
case 'PROMPT_DIALOG': return [3 /*break*/, 27];
|
|
2447
|
+
}
|
|
2063
2448
|
return [3 /*break*/, 29];
|
|
2064
2449
|
case 6:
|
|
2450
|
+
resultString = replaceParameters(currentTemplate.content, parameters);
|
|
2451
|
+
return [3 /*break*/, 30];
|
|
2452
|
+
case 7:
|
|
2065
2453
|
prompt = {
|
|
2066
2454
|
title: currentTemplate.title,
|
|
2067
2455
|
pipelineUrl: "".concat(pipeline.pipelineUrl
|
|
2068
2456
|
? pipeline.pipelineUrl
|
|
2069
2457
|
: 'anonymous' /* <- TODO: [๐ง ] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
|
|
2070
|
-
parameters:
|
|
2071
|
-
content:
|
|
2072
|
-
// <- TODO: !!!!! Apply {context} and knowledges
|
|
2073
|
-
// <- TODO: !!!!! Apply samples
|
|
2458
|
+
parameters: parameters,
|
|
2459
|
+
content: currentTemplate.content,
|
|
2074
2460
|
modelRequirements: currentTemplate.modelRequirements,
|
|
2075
|
-
|
|
2076
|
-
|
|
2461
|
+
expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
|
|
2462
|
+
var name = _a.name;
|
|
2463
|
+
return name === currentTemplate.personaName;
|
|
2464
|
+
}) || {})), currentTemplate.expectations),
|
|
2077
2465
|
expectFormat: currentTemplate.expectFormat,
|
|
2078
|
-
postprocessing: (currentTemplate.
|
|
2079
|
-
var errors, _a, _b, scriptTools, error_5,
|
|
2080
|
-
var
|
|
2466
|
+
postprocessing: (currentTemplate.postprocessingFunctionNames || []).map(function (functionName) { return function (result) { return __awaiter(_this, void 0, void 0, function () {
|
|
2467
|
+
var errors, _a, _b, scriptTools, error_5, e_8_1;
|
|
2468
|
+
var e_8, _c;
|
|
2081
2469
|
return __generator(this, function (_d) {
|
|
2082
2470
|
switch (_d.label) {
|
|
2083
2471
|
case 0:
|
|
@@ -2098,7 +2486,7 @@ function createPipelineExecutor(options) {
|
|
|
2098
2486
|
script: "".concat(functionName, "(result)"),
|
|
2099
2487
|
parameters: {
|
|
2100
2488
|
result: result || '',
|
|
2101
|
-
// Note: No ...
|
|
2489
|
+
// Note: No ...parametersForTemplate, because working with result only
|
|
2102
2490
|
},
|
|
2103
2491
|
})];
|
|
2104
2492
|
case 4: return [2 /*return*/, _d.sent()];
|
|
@@ -2107,6 +2495,9 @@ function createPipelineExecutor(options) {
|
|
|
2107
2495
|
if (!(error_5 instanceof Error)) {
|
|
2108
2496
|
throw error_5;
|
|
2109
2497
|
}
|
|
2498
|
+
if (error_5 instanceof UnexpectedError) {
|
|
2499
|
+
throw error_5;
|
|
2500
|
+
}
|
|
2110
2501
|
errors.push(error_5);
|
|
2111
2502
|
return [3 /*break*/, 6];
|
|
2112
2503
|
case 6:
|
|
@@ -2114,14 +2505,14 @@ function createPipelineExecutor(options) {
|
|
|
2114
2505
|
return [3 /*break*/, 2];
|
|
2115
2506
|
case 7: return [3 /*break*/, 10];
|
|
2116
2507
|
case 8:
|
|
2117
|
-
|
|
2118
|
-
|
|
2508
|
+
e_8_1 = _d.sent();
|
|
2509
|
+
e_8 = { error: e_8_1 };
|
|
2119
2510
|
return [3 /*break*/, 10];
|
|
2120
2511
|
case 9:
|
|
2121
2512
|
try {
|
|
2122
2513
|
if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
|
|
2123
2514
|
}
|
|
2124
|
-
finally { if (
|
|
2515
|
+
finally { if (e_8) throw e_8.error; }
|
|
2125
2516
|
return [7 /*endfinally*/];
|
|
2126
2517
|
case 10:
|
|
2127
2518
|
if (errors.length === 0) {
|
|
@@ -2137,35 +2528,35 @@ function createPipelineExecutor(options) {
|
|
|
2137
2528
|
});
|
|
2138
2529
|
}); }; }),
|
|
2139
2530
|
};
|
|
2140
|
-
|
|
2141
|
-
switch (
|
|
2142
|
-
case 'CHAT': return [3 /*break*/,
|
|
2143
|
-
case 'COMPLETION': return [3 /*break*/,
|
|
2144
|
-
case 'EMBEDDING': return [3 /*break*/,
|
|
2531
|
+
_g = currentTemplate.modelRequirements.modelVariant;
|
|
2532
|
+
switch (_g) {
|
|
2533
|
+
case 'CHAT': return [3 /*break*/, 8];
|
|
2534
|
+
case 'COMPLETION': return [3 /*break*/, 10];
|
|
2535
|
+
case 'EMBEDDING': return [3 /*break*/, 12];
|
|
2145
2536
|
}
|
|
2146
|
-
return [3 /*break*/,
|
|
2147
|
-
case
|
|
2148
|
-
case
|
|
2149
|
-
chatResult =
|
|
2537
|
+
return [3 /*break*/, 14];
|
|
2538
|
+
case 8: return [4 /*yield*/, llmTools.callChatModel(deepFreeze(prompt))];
|
|
2539
|
+
case 9:
|
|
2540
|
+
chatResult = _u.sent();
|
|
2150
2541
|
// TODO: [๐ฌ] Destroy chatThread
|
|
2151
2542
|
result = chatResult;
|
|
2152
2543
|
resultString = chatResult.content;
|
|
2153
|
-
return [3 /*break*/,
|
|
2154
|
-
case
|
|
2155
|
-
case
|
|
2156
|
-
completionResult =
|
|
2544
|
+
return [3 /*break*/, 15];
|
|
2545
|
+
case 10: return [4 /*yield*/, llmTools.callCompletionModel(deepFreeze(prompt))];
|
|
2546
|
+
case 11:
|
|
2547
|
+
completionResult = _u.sent();
|
|
2157
2548
|
result = completionResult;
|
|
2158
2549
|
resultString = completionResult.content;
|
|
2159
|
-
return [3 /*break*/,
|
|
2160
|
-
case
|
|
2161
|
-
case
|
|
2162
|
-
embeddingResult =
|
|
2550
|
+
return [3 /*break*/, 15];
|
|
2551
|
+
case 12: return [4 /*yield*/, llmTools.callEmbeddingModel(deepFreeze(prompt))];
|
|
2552
|
+
case 13:
|
|
2553
|
+
embeddingResult = _u.sent();
|
|
2163
2554
|
result = embeddingResult;
|
|
2164
2555
|
resultString = embeddingResult.content.join(',');
|
|
2165
|
-
return [3 /*break*/,
|
|
2166
|
-
case
|
|
2167
|
-
case
|
|
2168
|
-
case
|
|
2556
|
+
return [3 /*break*/, 15];
|
|
2557
|
+
case 14: throw new PipelineExecutionError("Unknown model variant \"".concat(currentTemplate.modelRequirements.modelVariant, "\""));
|
|
2558
|
+
case 15: return [3 /*break*/, 30];
|
|
2559
|
+
case 16:
|
|
2169
2560
|
if (arrayableToArray(tools.script).length === 0) {
|
|
2170
2561
|
throw new PipelineExecutionError('No script execution tools are available');
|
|
2171
2562
|
}
|
|
@@ -2174,49 +2565,52 @@ function createPipelineExecutor(options) {
|
|
|
2174
2565
|
}
|
|
2175
2566
|
// TODO: DRY [1]
|
|
2176
2567
|
scriptPipelineExecutionErrors = [];
|
|
2177
|
-
|
|
2178
|
-
case 16:
|
|
2179
|
-
_o.trys.push([16, 23, 24, 25]);
|
|
2180
|
-
_c = (e_2 = void 0, __values(arrayableToArray(tools.script))), _d = _c.next();
|
|
2181
|
-
_o.label = 17;
|
|
2568
|
+
_u.label = 17;
|
|
2182
2569
|
case 17:
|
|
2183
|
-
|
|
2184
|
-
|
|
2185
|
-
|
|
2570
|
+
_u.trys.push([17, 24, 25, 26]);
|
|
2571
|
+
_h = (e_4 = void 0, __values(arrayableToArray(tools.script))), _j = _h.next();
|
|
2572
|
+
_u.label = 18;
|
|
2186
2573
|
case 18:
|
|
2187
|
-
|
|
2188
|
-
|
|
2574
|
+
if (!!_j.done) return [3 /*break*/, 23];
|
|
2575
|
+
scriptTools = _j.value;
|
|
2576
|
+
_u.label = 19;
|
|
2577
|
+
case 19:
|
|
2578
|
+
_u.trys.push([19, 21, , 22]);
|
|
2579
|
+
return [4 /*yield*/, scriptTools.execute(deepFreeze({
|
|
2189
2580
|
scriptLanguage: currentTemplate.contentLanguage,
|
|
2190
2581
|
script: currentTemplate.content,
|
|
2191
|
-
parameters:
|
|
2192
|
-
})];
|
|
2193
|
-
case 19:
|
|
2194
|
-
resultString = _o.sent();
|
|
2195
|
-
return [3 /*break*/, 22];
|
|
2582
|
+
parameters: parameters,
|
|
2583
|
+
}))];
|
|
2196
2584
|
case 20:
|
|
2197
|
-
|
|
2585
|
+
resultString = _u.sent();
|
|
2586
|
+
return [3 /*break*/, 23];
|
|
2587
|
+
case 21:
|
|
2588
|
+
error_2 = _u.sent();
|
|
2198
2589
|
if (!(error_2 instanceof Error)) {
|
|
2199
2590
|
throw error_2;
|
|
2200
2591
|
}
|
|
2592
|
+
if (error_2 instanceof UnexpectedError) {
|
|
2593
|
+
throw error_2;
|
|
2594
|
+
}
|
|
2201
2595
|
scriptPipelineExecutionErrors.push(error_2);
|
|
2202
|
-
return [3 /*break*/,
|
|
2203
|
-
case
|
|
2204
|
-
|
|
2205
|
-
return [3 /*break*/,
|
|
2206
|
-
case
|
|
2207
|
-
case 23:
|
|
2208
|
-
e_2_1 = _o.sent();
|
|
2209
|
-
e_2 = { error: e_2_1 };
|
|
2210
|
-
return [3 /*break*/, 25];
|
|
2596
|
+
return [3 /*break*/, 22];
|
|
2597
|
+
case 22:
|
|
2598
|
+
_j = _h.next();
|
|
2599
|
+
return [3 /*break*/, 18];
|
|
2600
|
+
case 23: return [3 /*break*/, 26];
|
|
2211
2601
|
case 24:
|
|
2602
|
+
e_4_1 = _u.sent();
|
|
2603
|
+
e_4 = { error: e_4_1 };
|
|
2604
|
+
return [3 /*break*/, 26];
|
|
2605
|
+
case 25:
|
|
2212
2606
|
try {
|
|
2213
|
-
if (
|
|
2607
|
+
if (_j && !_j.done && (_q = _h.return)) _q.call(_h);
|
|
2214
2608
|
}
|
|
2215
|
-
finally { if (
|
|
2609
|
+
finally { if (e_4) throw e_4.error; }
|
|
2216
2610
|
return [7 /*endfinally*/];
|
|
2217
|
-
case
|
|
2611
|
+
case 26:
|
|
2218
2612
|
if (resultString !== null) {
|
|
2219
|
-
return [3 /*break*/,
|
|
2613
|
+
return [3 /*break*/, 30];
|
|
2220
2614
|
}
|
|
2221
2615
|
if (scriptPipelineExecutionErrors.length === 1) {
|
|
2222
2616
|
throw scriptPipelineExecutionErrors[0];
|
|
@@ -2226,101 +2620,104 @@ function createPipelineExecutor(options) {
|
|
|
2226
2620
|
.map(function (error) { return '- ' + error.message; })
|
|
2227
2621
|
.join('\n\n')), "\n "); }));
|
|
2228
2622
|
}
|
|
2229
|
-
case
|
|
2623
|
+
case 27:
|
|
2230
2624
|
if (tools.userInterface === undefined) {
|
|
2231
2625
|
throw new PipelineExecutionError('User interface tools are not available');
|
|
2232
2626
|
}
|
|
2233
|
-
return [4 /*yield*/, tools.userInterface.promptDialog({
|
|
2627
|
+
return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
|
|
2234
2628
|
promptTitle: currentTemplate.title,
|
|
2235
|
-
promptMessage: replaceParameters(currentTemplate.description || '',
|
|
2236
|
-
defaultValue: replaceParameters(currentTemplate.content,
|
|
2629
|
+
promptMessage: replaceParameters(currentTemplate.description || '', parameters),
|
|
2630
|
+
defaultValue: replaceParameters(currentTemplate.content, parameters),
|
|
2237
2631
|
// TODO: [๐ง ] !! Figure out how to define placeholder in .ptbk.md file
|
|
2238
2632
|
placeholder: undefined,
|
|
2239
2633
|
priority: priority,
|
|
2240
|
-
})];
|
|
2241
|
-
case
|
|
2634
|
+
}))];
|
|
2635
|
+
case 28:
|
|
2242
2636
|
// TODO: [๐น] When making next attempt for `PROMPT DIALOG`, preserve the previous user input
|
|
2243
|
-
resultString =
|
|
2244
|
-
return [3 /*break*/,
|
|
2245
|
-
case
|
|
2246
|
-
case 29:
|
|
2247
|
-
if (!(!isJokerAttempt && currentTemplate.postprocessing)) return [3 /*break*/, 46];
|
|
2248
|
-
_o.label = 30;
|
|
2637
|
+
resultString = _u.sent();
|
|
2638
|
+
return [3 /*break*/, 30];
|
|
2639
|
+
case 29: throw new PipelineExecutionError("Unknown execution type \"".concat(currentTemplate.blockType, "\""));
|
|
2249
2640
|
case 30:
|
|
2250
|
-
|
|
2251
|
-
|
|
2252
|
-
_o.label = 31;
|
|
2641
|
+
if (!(!isJokerAttempt && currentTemplate.postprocessingFunctionNames)) return [3 /*break*/, 47];
|
|
2642
|
+
_u.label = 31;
|
|
2253
2643
|
case 31:
|
|
2254
|
-
|
|
2255
|
-
|
|
2644
|
+
_u.trys.push([31, 45, 46, 47]);
|
|
2645
|
+
_k = (e_6 = void 0, __values(currentTemplate.postprocessingFunctionNames)), _l = _k.next();
|
|
2646
|
+
_u.label = 32;
|
|
2647
|
+
case 32:
|
|
2648
|
+
if (!!_l.done) return [3 /*break*/, 44];
|
|
2649
|
+
functionName = _l.value;
|
|
2256
2650
|
// TODO: DRY [1]
|
|
2257
2651
|
scriptPipelineExecutionErrors = [];
|
|
2258
2652
|
postprocessingError = null;
|
|
2259
|
-
|
|
2260
|
-
case 32:
|
|
2261
|
-
_o.trys.push([32, 39, 40, 41]);
|
|
2262
|
-
_g = (e_3 = void 0, __values(arrayableToArray(tools.script))), _h = _g.next();
|
|
2263
|
-
_o.label = 33;
|
|
2653
|
+
_u.label = 33;
|
|
2264
2654
|
case 33:
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2655
|
+
_u.trys.push([33, 40, 41, 42]);
|
|
2656
|
+
_m = (e_5 = void 0, __values(arrayableToArray(tools.script))), _o = _m.next();
|
|
2657
|
+
_u.label = 34;
|
|
2268
2658
|
case 34:
|
|
2269
|
-
_o.
|
|
2659
|
+
if (!!_o.done) return [3 /*break*/, 39];
|
|
2660
|
+
scriptTools = _o.value;
|
|
2661
|
+
_u.label = 35;
|
|
2662
|
+
case 35:
|
|
2663
|
+
_u.trys.push([35, 37, , 38]);
|
|
2270
2664
|
return [4 /*yield*/, scriptTools.execute({
|
|
2271
2665
|
scriptLanguage: "javascript" /* <- TODO: Try it in each languages; In future allow postprocessing with arbitrary combination of languages to combine */,
|
|
2272
2666
|
script: "".concat(functionName, "(resultString)"),
|
|
2273
2667
|
parameters: {
|
|
2274
2668
|
resultString: resultString || '',
|
|
2275
|
-
// Note: No ...
|
|
2669
|
+
// Note: No ...parametersForTemplate, because working with result only
|
|
2276
2670
|
},
|
|
2277
2671
|
})];
|
|
2278
|
-
case 35:
|
|
2279
|
-
resultString = _o.sent();
|
|
2280
|
-
postprocessingError = null;
|
|
2281
|
-
return [3 /*break*/, 38];
|
|
2282
2672
|
case 36:
|
|
2283
|
-
|
|
2673
|
+
resultString = _u.sent();
|
|
2674
|
+
postprocessingError = null;
|
|
2675
|
+
return [3 /*break*/, 39];
|
|
2676
|
+
case 37:
|
|
2677
|
+
error_3 = _u.sent();
|
|
2284
2678
|
if (!(error_3 instanceof Error)) {
|
|
2285
2679
|
throw error_3;
|
|
2286
2680
|
}
|
|
2681
|
+
if (error_3 instanceof UnexpectedError) {
|
|
2682
|
+
throw error_3;
|
|
2683
|
+
}
|
|
2287
2684
|
postprocessingError = error_3;
|
|
2288
2685
|
scriptPipelineExecutionErrors.push(error_3);
|
|
2289
|
-
return [3 /*break*/,
|
|
2290
|
-
case
|
|
2291
|
-
|
|
2292
|
-
return [3 /*break*/,
|
|
2293
|
-
case
|
|
2294
|
-
case 39:
|
|
2295
|
-
e_3_1 = _o.sent();
|
|
2296
|
-
e_3 = { error: e_3_1 };
|
|
2297
|
-
return [3 /*break*/, 41];
|
|
2686
|
+
return [3 /*break*/, 38];
|
|
2687
|
+
case 38:
|
|
2688
|
+
_o = _m.next();
|
|
2689
|
+
return [3 /*break*/, 34];
|
|
2690
|
+
case 39: return [3 /*break*/, 42];
|
|
2298
2691
|
case 40:
|
|
2692
|
+
e_5_1 = _u.sent();
|
|
2693
|
+
e_5 = { error: e_5_1 };
|
|
2694
|
+
return [3 /*break*/, 42];
|
|
2695
|
+
case 41:
|
|
2299
2696
|
try {
|
|
2300
|
-
if (
|
|
2697
|
+
if (_o && !_o.done && (_s = _m.return)) _s.call(_m);
|
|
2301
2698
|
}
|
|
2302
|
-
finally { if (
|
|
2699
|
+
finally { if (e_5) throw e_5.error; }
|
|
2303
2700
|
return [7 /*endfinally*/];
|
|
2304
|
-
case
|
|
2701
|
+
case 42:
|
|
2305
2702
|
if (postprocessingError) {
|
|
2306
2703
|
throw postprocessingError;
|
|
2307
2704
|
}
|
|
2308
|
-
|
|
2309
|
-
case
|
|
2310
|
-
|
|
2311
|
-
return [3 /*break*/,
|
|
2312
|
-
case
|
|
2313
|
-
case 44:
|
|
2314
|
-
e_4_1 = _o.sent();
|
|
2315
|
-
e_4 = { error: e_4_1 };
|
|
2316
|
-
return [3 /*break*/, 46];
|
|
2705
|
+
_u.label = 43;
|
|
2706
|
+
case 43:
|
|
2707
|
+
_l = _k.next();
|
|
2708
|
+
return [3 /*break*/, 32];
|
|
2709
|
+
case 44: return [3 /*break*/, 47];
|
|
2317
2710
|
case 45:
|
|
2711
|
+
e_6_1 = _u.sent();
|
|
2712
|
+
e_6 = { error: e_6_1 };
|
|
2713
|
+
return [3 /*break*/, 47];
|
|
2714
|
+
case 46:
|
|
2318
2715
|
try {
|
|
2319
|
-
if (
|
|
2716
|
+
if (_l && !_l.done && (_r = _k.return)) _r.call(_k);
|
|
2320
2717
|
}
|
|
2321
|
-
finally { if (
|
|
2718
|
+
finally { if (e_6) throw e_6.error; }
|
|
2322
2719
|
return [7 /*endfinally*/];
|
|
2323
|
-
case
|
|
2720
|
+
case 47:
|
|
2324
2721
|
// TODO: [๐] Unite object for expecting amount and format
|
|
2325
2722
|
if (currentTemplate.expectFormat) {
|
|
2326
2723
|
if (currentTemplate.expectFormat === 'JSON') {
|
|
@@ -2333,15 +2730,18 @@ function createPipelineExecutor(options) {
|
|
|
2333
2730
|
if (currentTemplate.expectations) {
|
|
2334
2731
|
checkExpectations(currentTemplate.expectations, resultString || '');
|
|
2335
2732
|
}
|
|
2336
|
-
return [3 /*break*/,
|
|
2337
|
-
case
|
|
2338
|
-
error_4 =
|
|
2733
|
+
return [3 /*break*/, 52];
|
|
2734
|
+
case 48:
|
|
2735
|
+
error_4 = _u.sent();
|
|
2339
2736
|
if (!(error_4 instanceof ExpectError)) {
|
|
2340
2737
|
throw error_4;
|
|
2341
2738
|
}
|
|
2739
|
+
if (error_4 instanceof UnexpectedError) {
|
|
2740
|
+
throw error_4;
|
|
2741
|
+
}
|
|
2342
2742
|
expectError = error_4;
|
|
2343
|
-
return [3 /*break*/,
|
|
2344
|
-
case
|
|
2743
|
+
return [3 /*break*/, 50];
|
|
2744
|
+
case 49:
|
|
2345
2745
|
if (!isJokerAttempt &&
|
|
2346
2746
|
currentTemplate.blockType === 'PROMPT_TEMPLATE' &&
|
|
2347
2747
|
prompt
|
|
@@ -2363,15 +2763,15 @@ function createPipelineExecutor(options) {
|
|
|
2363
2763
|
});
|
|
2364
2764
|
}
|
|
2365
2765
|
return [7 /*endfinally*/];
|
|
2366
|
-
case
|
|
2766
|
+
case 50:
|
|
2367
2767
|
if (expectError !== null && attempt === maxAttempts - 1) {
|
|
2368
2768
|
throw new PipelineExecutionError(spaceTrim$1(function (block) { return "\n LLM execution failed ".concat(maxExecutionAttempts, "x\n\n ---\n Last error ").concat((expectError === null || expectError === void 0 ? void 0 : expectError.name) || '', ":\n ").concat(block((expectError === null || expectError === void 0 ? void 0 : expectError.message) || ''), "\n\n Last result:\n ").concat(resultString, "\n ---\n "); }));
|
|
2369
2769
|
}
|
|
2370
|
-
|
|
2371
|
-
case 50:
|
|
2372
|
-
attempt++;
|
|
2373
|
-
return [3 /*break*/, 3];
|
|
2770
|
+
_u.label = 51;
|
|
2374
2771
|
case 51:
|
|
2772
|
+
attempt++;
|
|
2773
|
+
return [3 /*break*/, 4];
|
|
2774
|
+
case 52:
|
|
2375
2775
|
if (resultString === null) {
|
|
2376
2776
|
throw new UnexpectedError('Something went wrong and prompt result is null');
|
|
2377
2777
|
}
|
|
@@ -2387,18 +2787,27 @@ function createPipelineExecutor(options) {
|
|
|
2387
2787
|
// <- [3]
|
|
2388
2788
|
});
|
|
2389
2789
|
}
|
|
2390
|
-
parametersToPass = __assign(__assign({}, parametersToPass), (
|
|
2790
|
+
parametersToPass = Object.freeze(__assign(__assign({}, parametersToPass), (_t = {}, _t[currentTemplate.resultingParameterName] = resultString /* <- Note: Not need to detect parameter collision here because pipeline checks logic consistency during construction */, _t)));
|
|
2391
2791
|
return [2 /*return*/];
|
|
2392
2792
|
}
|
|
2393
2793
|
});
|
|
2394
2794
|
});
|
|
2395
2795
|
}
|
|
2396
|
-
var
|
|
2397
|
-
var e_1,
|
|
2398
|
-
return __generator(this, function (
|
|
2399
|
-
switch (
|
|
2796
|
+
var executionReport, _a, _b, parameter, parametersToPass, resovedParameters_1, unresovedTemplates, resolving_1, loopLimit, _loop_1, error_1, usage_1, outputParameters, errors, _c, _d, parameter, usage;
|
|
2797
|
+
var e_1, _e, e_2, _f;
|
|
2798
|
+
return __generator(this, function (_g) {
|
|
2799
|
+
switch (_g.label) {
|
|
2400
2800
|
case 0:
|
|
2401
|
-
|
|
2801
|
+
if (!(pipeline === undefined)) return [3 /*break*/, 2];
|
|
2802
|
+
return [4 /*yield*/, preparePipeline(rawPipeline, {
|
|
2803
|
+
llmTools: llmTools,
|
|
2804
|
+
isVerbose: isVerbose,
|
|
2805
|
+
maxParallelCount: maxParallelCount,
|
|
2806
|
+
})];
|
|
2807
|
+
case 1:
|
|
2808
|
+
pipeline = _g.sent();
|
|
2809
|
+
_g.label = 2;
|
|
2810
|
+
case 2:
|
|
2402
2811
|
executionReport = {
|
|
2403
2812
|
pipelineUrl: pipeline.pipelineUrl,
|
|
2404
2813
|
title: pipeline.title,
|
|
@@ -2407,9 +2816,35 @@ function createPipelineExecutor(options) {
|
|
|
2407
2816
|
description: pipeline.description,
|
|
2408
2817
|
promptExecutions: [],
|
|
2409
2818
|
};
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2819
|
+
try {
|
|
2820
|
+
// Note: Check that all input input parameters are defined
|
|
2821
|
+
for (_a = __values(pipeline.parameters), _b = _a.next(); !_b.done; _b = _a.next()) {
|
|
2822
|
+
parameter = _b.value;
|
|
2823
|
+
if (parameter.isInput && inputParameters[parameter.name] === undefined) {
|
|
2824
|
+
return [2 /*return*/, deepFreezeWithSameType({
|
|
2825
|
+
isSuccessful: false,
|
|
2826
|
+
errors: [
|
|
2827
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter")),
|
|
2828
|
+
// <- TODO: !!!!! Test this error
|
|
2829
|
+
],
|
|
2830
|
+
executionReport: executionReport,
|
|
2831
|
+
outputParameters: {},
|
|
2832
|
+
usage: ZERO_USAGE,
|
|
2833
|
+
})];
|
|
2834
|
+
}
|
|
2835
|
+
}
|
|
2836
|
+
}
|
|
2837
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
2838
|
+
finally {
|
|
2839
|
+
try {
|
|
2840
|
+
if (_b && !_b.done && (_e = _a.return)) _e.call(_a);
|
|
2841
|
+
}
|
|
2842
|
+
finally { if (e_1) throw e_1.error; }
|
|
2843
|
+
}
|
|
2844
|
+
parametersToPass = inputParameters;
|
|
2845
|
+
_g.label = 3;
|
|
2846
|
+
case 3:
|
|
2847
|
+
_g.trys.push([3, 8, , 9]);
|
|
2413
2848
|
resovedParameters_1 = pipeline.parameters
|
|
2414
2849
|
.filter(function (_a) {
|
|
2415
2850
|
var isInput = _a.isInput;
|
|
@@ -2424,8 +2859,8 @@ function createPipelineExecutor(options) {
|
|
|
2424
2859
|
loopLimit = LOOP_LIMIT;
|
|
2425
2860
|
_loop_1 = function () {
|
|
2426
2861
|
var currentTemplate, work_1;
|
|
2427
|
-
return __generator(this, function (
|
|
2428
|
-
switch (
|
|
2862
|
+
return __generator(this, function (_h) {
|
|
2863
|
+
switch (_h.label) {
|
|
2429
2864
|
case 0:
|
|
2430
2865
|
if (loopLimit-- < 0) {
|
|
2431
2866
|
// Note: Really UnexpectedError not LimitReachedError - this should be catched during validatePipeline
|
|
@@ -2440,7 +2875,7 @@ function createPipelineExecutor(options) {
|
|
|
2440
2875
|
if (!!currentTemplate) return [3 /*break*/, 3];
|
|
2441
2876
|
/* [5] */ return [4 /*yield*/, Promise.race(resolving_1)];
|
|
2442
2877
|
case 2:
|
|
2443
|
-
/* [5] */
|
|
2878
|
+
/* [5] */ _h.sent();
|
|
2444
2879
|
return [3 /*break*/, 4];
|
|
2445
2880
|
case 3:
|
|
2446
2881
|
unresovedTemplates = unresovedTemplates.filter(function (template) { return template !== currentTemplate; });
|
|
@@ -2452,24 +2887,24 @@ function createPipelineExecutor(options) {
|
|
|
2452
2887
|
resolving_1 = resolving_1.filter(function (w) { return w !== work_1; });
|
|
2453
2888
|
});
|
|
2454
2889
|
resolving_1.push(work_1);
|
|
2455
|
-
|
|
2890
|
+
_h.label = 4;
|
|
2456
2891
|
case 4: return [2 /*return*/];
|
|
2457
2892
|
}
|
|
2458
2893
|
});
|
|
2459
2894
|
};
|
|
2460
|
-
|
|
2461
|
-
case
|
|
2462
|
-
if (!(unresovedTemplates.length > 0)) return [3 /*break*/,
|
|
2895
|
+
_g.label = 4;
|
|
2896
|
+
case 4:
|
|
2897
|
+
if (!(unresovedTemplates.length > 0)) return [3 /*break*/, 6];
|
|
2463
2898
|
return [5 /*yield**/, _loop_1()];
|
|
2464
|
-
case 3:
|
|
2465
|
-
_d.sent();
|
|
2466
|
-
return [3 /*break*/, 2];
|
|
2467
|
-
case 4: return [4 /*yield*/, Promise.all(resolving_1)];
|
|
2468
2899
|
case 5:
|
|
2469
|
-
|
|
2470
|
-
return [3 /*break*/,
|
|
2471
|
-
case 6:
|
|
2472
|
-
|
|
2900
|
+
_g.sent();
|
|
2901
|
+
return [3 /*break*/, 4];
|
|
2902
|
+
case 6: return [4 /*yield*/, Promise.all(resolving_1)];
|
|
2903
|
+
case 7:
|
|
2904
|
+
_g.sent();
|
|
2905
|
+
return [3 /*break*/, 9];
|
|
2906
|
+
case 8:
|
|
2907
|
+
error_1 = _g.sent();
|
|
2473
2908
|
if (!(error_1 instanceof Error)) {
|
|
2474
2909
|
throw error_1;
|
|
2475
2910
|
}
|
|
@@ -2477,49 +2912,56 @@ function createPipelineExecutor(options) {
|
|
|
2477
2912
|
var result = _a.result;
|
|
2478
2913
|
return (result === null || result === void 0 ? void 0 : result.usage) || ZERO_USAGE;
|
|
2479
2914
|
})), false));
|
|
2480
|
-
return [2 /*return*/, {
|
|
2915
|
+
return [2 /*return*/, deepFreezeWithSameType({
|
|
2481
2916
|
isSuccessful: false,
|
|
2482
2917
|
errors: [error_1],
|
|
2483
2918
|
usage: usage_1,
|
|
2484
2919
|
executionReport: executionReport,
|
|
2485
2920
|
outputParameters: parametersToPass,
|
|
2486
|
-
}];
|
|
2487
|
-
case
|
|
2921
|
+
})];
|
|
2922
|
+
case 9:
|
|
2923
|
+
outputParameters = {};
|
|
2924
|
+
errors = [];
|
|
2488
2925
|
try {
|
|
2489
2926
|
// Note: Filter ONLY output parameters
|
|
2490
|
-
for (
|
|
2491
|
-
|
|
2492
|
-
|
|
2927
|
+
for (_c = __values(pipeline.parameters.filter(function (_a) {
|
|
2928
|
+
var isOutput = _a.isOutput;
|
|
2929
|
+
return isOutput;
|
|
2930
|
+
})), _d = _c.next(); !_d.done; _d = _c.next()) {
|
|
2931
|
+
parameter = _d.value;
|
|
2932
|
+
if (parametersToPass[parameter.name] === undefined) {
|
|
2933
|
+
errors.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an output parameter but not set in the pipeline")));
|
|
2493
2934
|
continue;
|
|
2494
2935
|
}
|
|
2495
|
-
|
|
2936
|
+
outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
|
|
2496
2937
|
}
|
|
2497
2938
|
}
|
|
2498
|
-
catch (
|
|
2939
|
+
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
2499
2940
|
finally {
|
|
2500
2941
|
try {
|
|
2501
|
-
if (
|
|
2942
|
+
if (_d && !_d.done && (_f = _c.return)) _f.call(_c);
|
|
2502
2943
|
}
|
|
2503
|
-
finally { if (
|
|
2944
|
+
finally { if (e_2) throw e_2.error; }
|
|
2504
2945
|
}
|
|
2505
2946
|
usage = addUsage.apply(void 0, __spreadArray([], __read(executionReport.promptExecutions.map(function (_a) {
|
|
2506
2947
|
var result = _a.result;
|
|
2507
2948
|
return (result === null || result === void 0 ? void 0 : result.usage) || ZERO_USAGE;
|
|
2508
2949
|
})), false));
|
|
2509
|
-
return [2 /*return*/, {
|
|
2950
|
+
return [2 /*return*/, deepFreezeWithSameType({
|
|
2510
2951
|
isSuccessful: true,
|
|
2511
|
-
errors:
|
|
2952
|
+
errors: errors,
|
|
2512
2953
|
usage: usage,
|
|
2513
2954
|
executionReport: executionReport,
|
|
2514
|
-
outputParameters:
|
|
2515
|
-
}];
|
|
2955
|
+
outputParameters: outputParameters,
|
|
2956
|
+
})];
|
|
2516
2957
|
}
|
|
2517
2958
|
});
|
|
2518
2959
|
}); };
|
|
2519
2960
|
return pipelineExecutor;
|
|
2520
2961
|
}
|
|
2521
2962
|
/**
|
|
2522
|
-
* TODO:
|
|
2963
|
+
* TODO: Use isVerbose here (not only pass to `preparePipeline`)
|
|
2964
|
+
* TODO: [๐ช] Use maxParallelCount here (not only pass to `preparePipeline`)
|
|
2523
2965
|
* TODO: [โ] Probbably move expectations from templates to parameters
|
|
2524
2966
|
* TODO: [๐ง ] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
|
|
2525
2967
|
* TODO: [๐ง] Strongly type the executors to avoid need of remove nullables whtn noUncheckedIndexedAccess in tsconfig.json
|
|
@@ -2528,23 +2970,6 @@ function createPipelineExecutor(options) {
|
|
|
2528
2970
|
* TODO: [๐ ] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
2529
2971
|
*/
|
|
2530
2972
|
|
|
2531
|
-
/**
|
|
2532
|
-
* Just marks a place of place where should be something implemented
|
|
2533
|
-
* No side effects.
|
|
2534
|
-
*
|
|
2535
|
-
* Note: It can be usefull suppressing eslint errors of unused variables
|
|
2536
|
-
*
|
|
2537
|
-
* @param value any values
|
|
2538
|
-
* @returns void
|
|
2539
|
-
* @private within the repository
|
|
2540
|
-
*/
|
|
2541
|
-
function TODO_USE() {
|
|
2542
|
-
var value = [];
|
|
2543
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
2544
|
-
value[_i] = arguments[_i];
|
|
2545
|
-
}
|
|
2546
|
-
}
|
|
2547
|
-
|
|
2548
2973
|
/**
|
|
2549
2974
|
* @@@
|
|
2550
2975
|
*/
|
|
@@ -2672,7 +3097,7 @@ function prepareKnowledgeFromMarkdown(content /* <- TODO: [๐] (?maybe not) Al
|
|
|
2672
3097
|
});
|
|
2673
3098
|
}
|
|
2674
3099
|
/**
|
|
2675
|
-
* TODO: [๐ผ] !!! Export via `@promptbook/markdown`
|
|
3100
|
+
* TODO: [๐][๐ผ] !!! Export via `@promptbook/markdown`
|
|
2676
3101
|
* TODO: [๐ช] Do it in parallel 11:11
|
|
2677
3102
|
* Note: No need to aggregate usage here, it is done by intercepting the llmTools
|
|
2678
3103
|
*/
|
|
@@ -2695,7 +3120,7 @@ function prepareKnowledgePieces(knowledgeSources, options) {
|
|
|
2695
3120
|
var partialPieces, pieces;
|
|
2696
3121
|
return __generator(this, function (_a) {
|
|
2697
3122
|
switch (_a.label) {
|
|
2698
|
-
case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: [๐]
|
|
3123
|
+
case 0: return [4 /*yield*/, prepareKnowledgeFromMarkdown(knowledgeSource.source, // <- TODO: [๐] !!! Unhardcode markdown, detect which type it is
|
|
2699
3124
|
options)];
|
|
2700
3125
|
case 1:
|
|
2701
3126
|
partialPieces = _a.sent();
|
|
@@ -2733,7 +3158,7 @@ TODO: [๐ง] This is how it can look in future
|
|
|
2733
3158
|
> ):
|
|
2734
3159
|
*/
|
|
2735
3160
|
/**
|
|
2736
|
-
* TODO: [๐ผ] !!! Export via `@promptbook/core`
|
|
3161
|
+
* TODO: [๐][๐ผ] !!! Export via `@promptbook/core`
|
|
2737
3162
|
* TODO: [๐ง] In future one preparation can take data from previous preparation and save tokens and time
|
|
2738
3163
|
* Put `knowledgePieces` into `PrepareKnowledgeOptions`
|
|
2739
3164
|
* TODO: [๐ช] More than max things can run in parallel by acident [1,[2a,2b,_],[3a,3b,_]]
|
|
@@ -2859,12 +3284,15 @@ function preparePipeline(pipeline, options) {
|
|
|
2859
3284
|
partialknowledgePiecesPrepared = _b.sent();
|
|
2860
3285
|
knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [๐ง] -> */ currentPreparation.id] })); });
|
|
2861
3286
|
// ----- /Knowledge preparation -----
|
|
3287
|
+
// TODO: !!!!! Add context to each template (if missing)
|
|
3288
|
+
// TODO: !!!!! Apply samples to each template (if missing)
|
|
2862
3289
|
return [2 /*return*/, __assign(__assign({}, pipeline), { knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
|
|
2863
3290
|
}
|
|
2864
3291
|
});
|
|
2865
3292
|
});
|
|
2866
3293
|
}
|
|
2867
3294
|
/**
|
|
3295
|
+
* TODO: !!!!! Index the samples and maybe templates
|
|
2868
3296
|
* TODO: [๐ผ] !!! Export via `@promptbook/core`
|
|
2869
3297
|
* TODO: Write tests for `preparePipeline`
|
|
2870
3298
|
* TODO: [๐] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
@@ -2920,7 +3348,7 @@ var knowledgeCommandParser = {
|
|
|
2920
3348
|
/**
|
|
2921
3349
|
* Link to discussion
|
|
2922
3350
|
*/
|
|
2923
|
-
|
|
3351
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/41',
|
|
2924
3352
|
/**
|
|
2925
3353
|
* Example usages of the KNOWLEDGE command
|
|
2926
3354
|
*/
|
|
@@ -2957,8 +3385,9 @@ var knowledgeCommandParser = {
|
|
|
2957
3385
|
/**
|
|
2958
3386
|
* Note: Prototype of [๐ง] (remove this comment after full implementation)
|
|
2959
3387
|
*/
|
|
2960
|
-
applyToPipelineJson: function (
|
|
3388
|
+
applyToPipelineJson: function (personaCommand, subjects) {
|
|
2961
3389
|
var source = personaCommand.source;
|
|
3390
|
+
var pipelineJson = subjects.pipelineJson;
|
|
2962
3391
|
var name = titleToName(source);
|
|
2963
3392
|
pipelineJson.knowledgeSources.push({
|
|
2964
3393
|
name: name,
|
|
@@ -2993,7 +3422,7 @@ var personaCommandParser = {
|
|
|
2993
3422
|
/**
|
|
2994
3423
|
* Link to discussion
|
|
2995
3424
|
*/
|
|
2996
|
-
|
|
3425
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/22',
|
|
2997
3426
|
/**
|
|
2998
3427
|
* Example usages of the PERSONA command
|
|
2999
3428
|
*/
|
|
@@ -3021,8 +3450,15 @@ var personaCommandParser = {
|
|
|
3021
3450
|
/**
|
|
3022
3451
|
* Note: Prototype of [๐ง] (remove this comment after full implementation)
|
|
3023
3452
|
*/
|
|
3024
|
-
applyToPipelineJson: function (
|
|
3453
|
+
applyToPipelineJson: function (personaCommand, subjects) {
|
|
3025
3454
|
var personaName = personaCommand.personaName, personaDescription = personaCommand.personaDescription;
|
|
3455
|
+
var pipelineJson = subjects.pipelineJson, templateJson = subjects.templateJson;
|
|
3456
|
+
if (templateJson !== null) {
|
|
3457
|
+
if (templateJson.blockType !== 'PROMPT_TEMPLATE') {
|
|
3458
|
+
throw new ParsingError("PERSONA command can be used only in PROMPT_TEMPLATE block");
|
|
3459
|
+
}
|
|
3460
|
+
templateJson.personaName = personaName;
|
|
3461
|
+
}
|
|
3026
3462
|
var persona = pipelineJson.personas.find(function (persona) { return persona.name === personaName; });
|
|
3027
3463
|
if (persona === undefined) {
|
|
3028
3464
|
pipelineJson.personas.push({
|
|
@@ -3181,7 +3617,7 @@ var blockCommandParser = {
|
|
|
3181
3617
|
/**
|
|
3182
3618
|
* Link to discussion
|
|
3183
3619
|
*/
|
|
3184
|
-
|
|
3620
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/64',
|
|
3185
3621
|
/**
|
|
3186
3622
|
* Example usages of the BLOCK command
|
|
3187
3623
|
*/
|
|
@@ -3209,6 +3645,7 @@ var blockCommandParser = {
|
|
|
3209
3645
|
//---
|
|
3210
3646
|
/* <- TODO: [๐ง ] Maybe dynamic */
|
|
3211
3647
|
],
|
|
3648
|
+
// TODO: [โ๏ธ] order: -10 /* <- Note: Putting before other commands */
|
|
3212
3649
|
/**
|
|
3213
3650
|
* Parses the BLOCK command
|
|
3214
3651
|
*/
|
|
@@ -3325,7 +3762,7 @@ var expectCommandParser = {
|
|
|
3325
3762
|
/**
|
|
3326
3763
|
* Link to discussion
|
|
3327
3764
|
*/
|
|
3328
|
-
|
|
3765
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/30',
|
|
3329
3766
|
/**
|
|
3330
3767
|
* Example usages of the EXPECT command
|
|
3331
3768
|
*/
|
|
@@ -3443,7 +3880,7 @@ var jokerCommandParser = {
|
|
|
3443
3880
|
/**
|
|
3444
3881
|
* Link to discussion
|
|
3445
3882
|
*/
|
|
3446
|
-
|
|
3883
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/66',
|
|
3447
3884
|
/**
|
|
3448
3885
|
* Example usages of the JOKER command
|
|
3449
3886
|
*/
|
|
@@ -3486,7 +3923,11 @@ var modelCommandParser = {
|
|
|
3486
3923
|
/**
|
|
3487
3924
|
* BOILERPLATE command can be used in:
|
|
3488
3925
|
*/
|
|
3489
|
-
usagePlaces: [
|
|
3926
|
+
usagePlaces: [
|
|
3927
|
+
'PIPELINE_HEAD',
|
|
3928
|
+
// <- TODO: [๐ง ][โ] Should there be possibility to set MODEL for entire pipeline?
|
|
3929
|
+
'PIPELINE_TEMPLATE',
|
|
3930
|
+
],
|
|
3490
3931
|
/**
|
|
3491
3932
|
* Description of the MODEL command
|
|
3492
3933
|
*/
|
|
@@ -3494,7 +3935,7 @@ var modelCommandParser = {
|
|
|
3494
3935
|
/**
|
|
3495
3936
|
* Link to discussion
|
|
3496
3937
|
*/
|
|
3497
|
-
|
|
3938
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/67',
|
|
3498
3939
|
/**
|
|
3499
3940
|
* Example usages of the MODEL command
|
|
3500
3941
|
*/
|
|
@@ -3574,7 +4015,7 @@ var parameterCommandParser = {
|
|
|
3574
4015
|
/**
|
|
3575
4016
|
* Link to discussion
|
|
3576
4017
|
*/
|
|
3577
|
-
|
|
4018
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/68',
|
|
3578
4019
|
/**
|
|
3579
4020
|
* Example usages of the PARAMETER command
|
|
3580
4021
|
*/
|
|
@@ -3638,7 +4079,7 @@ var postprocessCommandParser = {
|
|
|
3638
4079
|
/**
|
|
3639
4080
|
* Link to discussion
|
|
3640
4081
|
*/
|
|
3641
|
-
|
|
4082
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/31',
|
|
3642
4083
|
/**
|
|
3643
4084
|
* Example usages of the POSTPROCESS command
|
|
3644
4085
|
*/
|
|
@@ -3693,7 +4134,7 @@ var promptbookVersionCommandParser = {
|
|
|
3693
4134
|
/**
|
|
3694
4135
|
* Link to discussion
|
|
3695
4136
|
*/
|
|
3696
|
-
|
|
4137
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/69',
|
|
3697
4138
|
/**
|
|
3698
4139
|
* Example usages of the PROMPTBOOK_VERSION command
|
|
3699
4140
|
*/
|
|
@@ -3746,7 +4187,7 @@ var urlCommandParser = {
|
|
|
3746
4187
|
/**
|
|
3747
4188
|
* Link to discussion
|
|
3748
4189
|
*/
|
|
3749
|
-
|
|
4190
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/70',
|
|
3750
4191
|
/**
|
|
3751
4192
|
* Example usages of the URL command
|
|
3752
4193
|
*/
|
|
@@ -3816,7 +4257,7 @@ var actionCommandParser = {
|
|
|
3816
4257
|
/**
|
|
3817
4258
|
* Link to discussion
|
|
3818
4259
|
*/
|
|
3819
|
-
|
|
4260
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/72',
|
|
3820
4261
|
/**
|
|
3821
4262
|
* Example usages of the ACTION command
|
|
3822
4263
|
*/
|
|
@@ -3855,7 +4296,7 @@ var instrumentCommandParser = {
|
|
|
3855
4296
|
/**
|
|
3856
4297
|
* Link to discussion
|
|
3857
4298
|
*/
|
|
3858
|
-
|
|
4299
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/71',
|
|
3859
4300
|
/**
|
|
3860
4301
|
* Example usages of the INSTRUMENT command
|
|
3861
4302
|
*/
|
|
@@ -3898,7 +4339,7 @@ var boilerplateCommandParser = {
|
|
|
3898
4339
|
/**
|
|
3899
4340
|
* Link to discussion
|
|
3900
4341
|
*/
|
|
3901
|
-
|
|
4342
|
+
documentationUrl: 'https://github.com/webgptorg/promptbook/discussions/@@',
|
|
3902
4343
|
/**
|
|
3903
4344
|
* Example usages of the BOILERPLATE command
|
|
3904
4345
|
*/
|
|
@@ -4032,9 +4473,9 @@ function parseCommand(raw, usagePlace) {
|
|
|
4032
4473
|
*/
|
|
4033
4474
|
function getSupportedCommandsMessage() {
|
|
4034
4475
|
return COMMANDS.flatMap(function (_a) {
|
|
4035
|
-
var name = _a.name, aliasNames = _a.aliasNames, description = _a.description,
|
|
4476
|
+
var name = _a.name, aliasNames = _a.aliasNames, description = _a.description, documentationUrl = _a.documentationUrl;
|
|
4036
4477
|
return __spreadArray([
|
|
4037
|
-
"- **".concat(name, "** ").concat(description, ", see [discussion](").concat(
|
|
4478
|
+
"- **".concat(name, "** ").concat(description, ", see [discussion](").concat(documentationUrl, ")")
|
|
4038
4479
|
], __read((aliasNames || []).map(function (aliasName) { return " - **".concat(aliasName, "** Alias for **").concat(name, "**"); })), false);
|
|
4039
4480
|
}).join('\n');
|
|
4040
4481
|
}
|
|
@@ -4389,195 +4830,6 @@ function removeContentComments(content) {
|
|
|
4389
4830
|
return spaceTrim$1(content.replace(/<!--(.*?)-->/gs, ''));
|
|
4390
4831
|
}
|
|
4391
4832
|
|
|
4392
|
-
/**
|
|
4393
|
-
* Create difference set of two sets.
|
|
4394
|
-
*
|
|
4395
|
-
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
|
|
4396
|
-
*/
|
|
4397
|
-
function difference(a, b, isEqual) {
|
|
4398
|
-
var e_1, _a;
|
|
4399
|
-
if (isEqual === void 0) { isEqual = function (a, b) { return a === b; }; }
|
|
4400
|
-
var diff = new Set();
|
|
4401
|
-
var _loop_1 = function (itemA) {
|
|
4402
|
-
if (!Array.from(b).some(function (itemB) { return isEqual(itemA, itemB); })) {
|
|
4403
|
-
diff.add(itemA);
|
|
4404
|
-
}
|
|
4405
|
-
};
|
|
4406
|
-
try {
|
|
4407
|
-
for (var _b = __values(Array.from(a)), _c = _b.next(); !_c.done; _c = _b.next()) {
|
|
4408
|
-
var itemA = _c.value;
|
|
4409
|
-
_loop_1(itemA);
|
|
4410
|
-
}
|
|
4411
|
-
}
|
|
4412
|
-
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
4413
|
-
finally {
|
|
4414
|
-
try {
|
|
4415
|
-
if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
|
|
4416
|
-
}
|
|
4417
|
-
finally { if (e_1) throw e_1.error; }
|
|
4418
|
-
}
|
|
4419
|
-
return diff;
|
|
4420
|
-
}
|
|
4421
|
-
|
|
4422
|
-
/**
|
|
4423
|
-
* Creates a new set with all elements that are present in either set
|
|
4424
|
-
*
|
|
4425
|
-
* @deprecated use new javascript set methods instead @see https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Set
|
|
4426
|
-
*/
|
|
4427
|
-
function union() {
|
|
4428
|
-
var e_1, _a, e_2, _b;
|
|
4429
|
-
var sets = [];
|
|
4430
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
4431
|
-
sets[_i] = arguments[_i];
|
|
4432
|
-
}
|
|
4433
|
-
var union = new Set();
|
|
4434
|
-
try {
|
|
4435
|
-
for (var sets_1 = __values(sets), sets_1_1 = sets_1.next(); !sets_1_1.done; sets_1_1 = sets_1.next()) {
|
|
4436
|
-
var set = sets_1_1.value;
|
|
4437
|
-
try {
|
|
4438
|
-
for (var _c = (e_2 = void 0, __values(Array.from(set))), _d = _c.next(); !_d.done; _d = _c.next()) {
|
|
4439
|
-
var item = _d.value;
|
|
4440
|
-
union.add(item);
|
|
4441
|
-
}
|
|
4442
|
-
}
|
|
4443
|
-
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
4444
|
-
finally {
|
|
4445
|
-
try {
|
|
4446
|
-
if (_d && !_d.done && (_b = _c.return)) _b.call(_c);
|
|
4447
|
-
}
|
|
4448
|
-
finally { if (e_2) throw e_2.error; }
|
|
4449
|
-
}
|
|
4450
|
-
}
|
|
4451
|
-
}
|
|
4452
|
-
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
4453
|
-
finally {
|
|
4454
|
-
try {
|
|
4455
|
-
if (sets_1_1 && !sets_1_1.done && (_a = sets_1.return)) _a.call(sets_1);
|
|
4456
|
-
}
|
|
4457
|
-
finally { if (e_1) throw e_1.error; }
|
|
4458
|
-
}
|
|
4459
|
-
return union;
|
|
4460
|
-
}
|
|
4461
|
-
|
|
4462
|
-
/**
|
|
4463
|
-
* Parses the template and returns the list of all parameter names
|
|
4464
|
-
*
|
|
4465
|
-
* @param template the template with parameters in {curly} braces
|
|
4466
|
-
* @returns the list of parameter names
|
|
4467
|
-
*/
|
|
4468
|
-
function extractParameters(template) {
|
|
4469
|
-
var e_1, _a;
|
|
4470
|
-
var matches = template.matchAll(/{\w+}/g);
|
|
4471
|
-
var parameterNames = new Set();
|
|
4472
|
-
try {
|
|
4473
|
-
for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
|
|
4474
|
-
var match = matches_1_1.value;
|
|
4475
|
-
var parameterName = match[0].slice(1, -1);
|
|
4476
|
-
parameterNames.add(parameterName);
|
|
4477
|
-
}
|
|
4478
|
-
}
|
|
4479
|
-
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
4480
|
-
finally {
|
|
4481
|
-
try {
|
|
4482
|
-
if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
|
|
4483
|
-
}
|
|
4484
|
-
finally { if (e_1) throw e_1.error; }
|
|
4485
|
-
}
|
|
4486
|
-
return parameterNames;
|
|
4487
|
-
}
|
|
4488
|
-
|
|
4489
|
-
/**
|
|
4490
|
-
* Parses the given script and returns the list of all used variables that are not defined in the script
|
|
4491
|
-
*
|
|
4492
|
-
* @param script from which to extract the variables
|
|
4493
|
-
* @returns the list of variable names
|
|
4494
|
-
* @throws {ParsingError} if the script is invalid
|
|
4495
|
-
*/
|
|
4496
|
-
function extractVariables(script) {
|
|
4497
|
-
var variables = new Set();
|
|
4498
|
-
script = "(()=>{".concat(script, "})()");
|
|
4499
|
-
try {
|
|
4500
|
-
for (var i = 0; i < 100 /* <- TODO: This limit to configuration */; i++)
|
|
4501
|
-
try {
|
|
4502
|
-
eval(script);
|
|
4503
|
-
}
|
|
4504
|
-
catch (error) {
|
|
4505
|
-
if (!(error instanceof ReferenceError)) {
|
|
4506
|
-
throw error;
|
|
4507
|
-
}
|
|
4508
|
-
var undefinedName = error.message.split(' ')[0];
|
|
4509
|
-
/*
|
|
4510
|
-
Note: Parsing the error
|
|
4511
|
-
[ReferenceError: thing is not defined]
|
|
4512
|
-
*/
|
|
4513
|
-
if (!undefinedName) {
|
|
4514
|
-
throw error;
|
|
4515
|
-
}
|
|
4516
|
-
if (script.includes(undefinedName + '(')) {
|
|
4517
|
-
script = "const ".concat(undefinedName, " = ()=>'';") + script;
|
|
4518
|
-
}
|
|
4519
|
-
else {
|
|
4520
|
-
variables.add(undefinedName);
|
|
4521
|
-
script = "const ".concat(undefinedName, " = '';") + script;
|
|
4522
|
-
}
|
|
4523
|
-
}
|
|
4524
|
-
}
|
|
4525
|
-
catch (error) {
|
|
4526
|
-
if (!(error instanceof Error)) {
|
|
4527
|
-
throw error;
|
|
4528
|
-
}
|
|
4529
|
-
throw new ParsingError(spaceTrim$1(function (block) { return "\n Can not extract variables from the script\n\n ".concat(block(error.name), ": ").concat(block(error.message), "\n "); }));
|
|
4530
|
-
}
|
|
4531
|
-
return variables;
|
|
4532
|
-
}
|
|
4533
|
-
/**
|
|
4534
|
-
* TODO: [๐ฃ] Support for multiple languages - python, java,...
|
|
4535
|
-
*/
|
|
4536
|
-
|
|
4537
|
-
/**
|
|
4538
|
-
* Parses the prompt template and returns the set of all used parameters
|
|
4539
|
-
*
|
|
4540
|
-
* @param promptTemplate the template with used parameters
|
|
4541
|
-
* @returns the set of parameter names
|
|
4542
|
-
* @throws {ParsingError} if the script is invalid
|
|
4543
|
-
*/
|
|
4544
|
-
function extractParametersFromPromptTemplate(promptTemplate) {
|
|
4545
|
-
var e_1, _a, e_2, _b;
|
|
4546
|
-
var parameterNames = new Set();
|
|
4547
|
-
try {
|
|
4548
|
-
for (var _c = __values(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(promptTemplate.title)), false), __read(extractParameters(promptTemplate.description || '')), false), __read(extractParameters(promptTemplate.content)), false)), _d = _c.next(); !_d.done; _d = _c.next()) {
|
|
4549
|
-
var parameterName = _d.value;
|
|
4550
|
-
parameterNames.add(parameterName);
|
|
4551
|
-
}
|
|
4552
|
-
}
|
|
4553
|
-
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
4554
|
-
finally {
|
|
4555
|
-
try {
|
|
4556
|
-
if (_d && !_d.done && (_a = _c.return)) _a.call(_c);
|
|
4557
|
-
}
|
|
4558
|
-
finally { if (e_1) throw e_1.error; }
|
|
4559
|
-
}
|
|
4560
|
-
if (promptTemplate.blockType === 'SCRIPT') {
|
|
4561
|
-
try {
|
|
4562
|
-
for (var _e = __values(extractVariables(promptTemplate.content)), _f = _e.next(); !_f.done; _f = _e.next()) {
|
|
4563
|
-
var parameterName = _f.value;
|
|
4564
|
-
parameterNames.add(parameterName);
|
|
4565
|
-
}
|
|
4566
|
-
}
|
|
4567
|
-
catch (e_2_1) { e_2 = { error: e_2_1 }; }
|
|
4568
|
-
finally {
|
|
4569
|
-
try {
|
|
4570
|
-
if (_f && !_f.done && (_b = _e.return)) _b.call(_e);
|
|
4571
|
-
}
|
|
4572
|
-
finally { if (e_2) throw e_2.error; }
|
|
4573
|
-
}
|
|
4574
|
-
}
|
|
4575
|
-
return parameterNames;
|
|
4576
|
-
}
|
|
4577
|
-
/**
|
|
4578
|
-
* TODO: [๐ฃ] If script require contentLanguage
|
|
4579
|
-
*/
|
|
4580
|
-
|
|
4581
4833
|
/**
|
|
4582
4834
|
* Compile pipeline from string (markdown) format to JSON format synchronously
|
|
4583
4835
|
*
|
|
@@ -4687,7 +4939,7 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4687
4939
|
pipelineJson.pipelineUrl = command.pipelineUrl.href;
|
|
4688
4940
|
break;
|
|
4689
4941
|
case 'KNOWLEDGE':
|
|
4690
|
-
knowledgeCommandParser.applyToPipelineJson(pipelineJson,
|
|
4942
|
+
knowledgeCommandParser.applyToPipelineJson(command, { pipelineJson: pipelineJson, templateJson: null });
|
|
4691
4943
|
break;
|
|
4692
4944
|
case 'ACTION':
|
|
4693
4945
|
console.error(new NotYetImplementedError('Actions are not implemented yet'));
|
|
@@ -4696,7 +4948,7 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4696
4948
|
console.error(new NotYetImplementedError('Instruments are not implemented yet'));
|
|
4697
4949
|
break;
|
|
4698
4950
|
case 'PERSONA':
|
|
4699
|
-
personaCommandParser.applyToPipelineJson(pipelineJson,
|
|
4951
|
+
personaCommandParser.applyToPipelineJson(command, { pipelineJson: pipelineJson, templateJson: null });
|
|
4700
4952
|
// <- Note: Prototype of [๐ง] (remove this comment after full implementation)
|
|
4701
4953
|
break;
|
|
4702
4954
|
case 'BOILERPLATE':
|
|
@@ -4720,13 +4972,6 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4720
4972
|
// TODO: Parse prompt template description (the content out of the codeblock and lists)
|
|
4721
4973
|
var templateModelRequirements = __assign({}, defaultModelRequirements);
|
|
4722
4974
|
var listItems_3 = extractAllListItemsFromMarkdown(section.content);
|
|
4723
|
-
var dependentParameterNames = new Set();
|
|
4724
|
-
var blockType = 'PROMPT_TEMPLATE';
|
|
4725
|
-
var jokers = [];
|
|
4726
|
-
var postprocessing = [];
|
|
4727
|
-
var expectAmount = {};
|
|
4728
|
-
var expectFormat = undefined;
|
|
4729
|
-
var isBlockTypeSet = false;
|
|
4730
4975
|
var lastLine = section.content.split('\n').pop();
|
|
4731
4976
|
var resultingParameterNameMatch = /^->\s*\{(?<resultingParamName>[a-z0-9_]+)\}/im.exec(lastLine);
|
|
4732
4977
|
var resultingParameterName = null;
|
|
@@ -4747,10 +4992,39 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4747
4992
|
.join('\n')), "\n "); }));
|
|
4748
4993
|
};
|
|
4749
4994
|
var _e = extractOneBlockFromMarkdown(section.content), language = _e.language, content = _e.content;
|
|
4750
|
-
|
|
4751
|
-
|
|
4752
|
-
|
|
4753
|
-
|
|
4995
|
+
// TODO: [๐พ][1] DRY description
|
|
4996
|
+
var description_1 = section.content;
|
|
4997
|
+
// Note: Remove codeblocks - TODO: [๐พ]
|
|
4998
|
+
description_1 = description_1.split(/^```.*^```/gms).join('');
|
|
4999
|
+
description_1 = description_1.split(/^>.*$/gm).join('');
|
|
5000
|
+
//Note: Remove lists and return statement - TODO: [๐พ]
|
|
5001
|
+
description_1 = description_1.split(/^(?:(?:-)|(?:\d\))|(?:`?->))\s+.*$/gm).join('');
|
|
5002
|
+
description_1 = spaceTrim$1(description_1);
|
|
5003
|
+
if (description_1 === '') {
|
|
5004
|
+
description_1 = undefined;
|
|
5005
|
+
}
|
|
5006
|
+
var templateJson = {
|
|
5007
|
+
blockType: 'PROMPT_TEMPLATE',
|
|
5008
|
+
name: titleToName(section.title),
|
|
5009
|
+
title: section.title,
|
|
5010
|
+
description: description_1,
|
|
5011
|
+
modelRequirements: templateModelRequirements,
|
|
5012
|
+
content: content,
|
|
5013
|
+
};
|
|
5014
|
+
/**
|
|
5015
|
+
* This is nessesary because block type can be
|
|
5016
|
+
* - Set zero times, so anticipate 'PROMPT_TEMPLATE'
|
|
5017
|
+
* - Set one time
|
|
5018
|
+
* - Set more times - throw error
|
|
5019
|
+
*
|
|
5020
|
+
* Note: [2]
|
|
5021
|
+
*/
|
|
5022
|
+
var isBlockTypeSet = false;
|
|
5023
|
+
try {
|
|
5024
|
+
for (var listItems_2 = (e_3 = void 0, __values(listItems_3)), listItems_2_1 = listItems_2.next(); !listItems_2_1.done; listItems_2_1 = listItems_2.next()) {
|
|
5025
|
+
var listItem = listItems_2_1.value;
|
|
5026
|
+
var command = parseCommand(listItem, 'PIPELINE_TEMPLATE');
|
|
5027
|
+
// TODO [๐ง][โ๏ธ] List commands and before apply order them
|
|
4754
5028
|
switch (command.type) {
|
|
4755
5029
|
// TODO: [๐ง] Use here applyToPipelineJson and remove switch statement
|
|
4756
5030
|
case 'BLOCK':
|
|
@@ -4768,9 +5042,12 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4768
5042
|
return "continue-templates";
|
|
4769
5043
|
}
|
|
4770
5044
|
if (command.blockType === 'KNOWLEDGE') {
|
|
4771
|
-
knowledgeCommandParser.applyToPipelineJson(
|
|
5045
|
+
knowledgeCommandParser.applyToPipelineJson({
|
|
4772
5046
|
type: 'KNOWLEDGE',
|
|
4773
|
-
source: content, // <- TODO: [๐]
|
|
5047
|
+
source: content, // <- TODO: [๐] !!! Work with KNOWLEDGE which not referring to the source file or website, but its content itself
|
|
5048
|
+
}, {
|
|
5049
|
+
pipelineJson: pipelineJson,
|
|
5050
|
+
templateJson: templateJson,
|
|
4774
5051
|
});
|
|
4775
5052
|
return "continue-templates";
|
|
4776
5053
|
}
|
|
@@ -4783,35 +5060,36 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4783
5060
|
return "continue-templates";
|
|
4784
5061
|
}
|
|
4785
5062
|
expectResultingParameterName();
|
|
4786
|
-
blockType = command.blockType;
|
|
4787
|
-
isBlockTypeSet = true;
|
|
5063
|
+
templateJson.blockType = command.blockType;
|
|
5064
|
+
isBlockTypeSet = true; //<- Note: [2]
|
|
4788
5065
|
break;
|
|
4789
5066
|
case 'EXPECT_AMOUNT':
|
|
4790
5067
|
// eslint-disable-next-line no-case-declarations
|
|
4791
5068
|
var unit = command.unit.toLowerCase();
|
|
4792
|
-
|
|
5069
|
+
templateJson.expectations = templateJson.expectations || {};
|
|
5070
|
+
templateJson.expectations[unit] = templateJson.expectations[unit] || {};
|
|
4793
5071
|
if (command.sign === 'MINIMUM' || command.sign === 'EXACTLY') {
|
|
4794
|
-
if (
|
|
4795
|
-
throw new ParsingError("Already defined minumum ".concat(
|
|
5072
|
+
if (templateJson.expectations[unit].min !== undefined) {
|
|
5073
|
+
throw new ParsingError("Already defined minumum ".concat(templateJson.expectations[unit].min, " ").concat(command.unit.toLowerCase(), ", now trying to redefine it to ").concat(command.amount));
|
|
4796
5074
|
}
|
|
4797
|
-
|
|
5075
|
+
templateJson.expectations[unit].min = command.amount;
|
|
4798
5076
|
} /* not else */
|
|
4799
5077
|
if (command.sign === 'MAXIMUM' || command.sign === 'EXACTLY') {
|
|
4800
|
-
if (
|
|
4801
|
-
throw new ParsingError("Already defined maximum ".concat(
|
|
5078
|
+
if (templateJson.expectations[unit].max !== undefined) {
|
|
5079
|
+
throw new ParsingError("Already defined maximum ".concat(templateJson.expectations[unit].max, " ").concat(command.unit.toLowerCase(), ", now trying to redefine it to ").concat(command.amount));
|
|
4802
5080
|
}
|
|
4803
|
-
|
|
5081
|
+
templateJson.expectations[unit].max = command.amount;
|
|
4804
5082
|
}
|
|
4805
5083
|
break;
|
|
4806
5084
|
case 'EXPECT_FORMAT':
|
|
4807
|
-
if (expectFormat !== undefined && command.format !== expectFormat) {
|
|
4808
|
-
throw new ParsingError("Expect format is already defined to \"".concat(expectFormat, "\"
|
|
5085
|
+
if (templateJson.expectFormat !== undefined && command.format !== templateJson.expectFormat) {
|
|
5086
|
+
throw new ParsingError(spaceTrim$1("\n Expect format is already defined to \"".concat(templateJson.expectFormat, "\".\n Now you try to redefine it by \"").concat(command.format, "\".\n ")));
|
|
4809
5087
|
}
|
|
4810
|
-
expectFormat = command.format;
|
|
5088
|
+
templateJson.expectFormat = command.format;
|
|
4811
5089
|
break;
|
|
4812
5090
|
case 'JOKER':
|
|
4813
|
-
|
|
4814
|
-
|
|
5091
|
+
templateJson.jokerParameterNames = templateJson.jokerParameterNames || [];
|
|
5092
|
+
templateJson.jokerParameterNames.push(command.parameterName);
|
|
4815
5093
|
break;
|
|
4816
5094
|
case 'MODEL':
|
|
4817
5095
|
templateModelRequirements[command.key] = command.value;
|
|
@@ -4821,11 +5099,12 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4821
5099
|
defineParam(command);
|
|
4822
5100
|
break;
|
|
4823
5101
|
case 'POSTPROCESS':
|
|
4824
|
-
|
|
5102
|
+
templateJson.postprocessingFunctionNames = templateJson.postprocessingFunctionNames || [];
|
|
5103
|
+
templateJson.postprocessingFunctionNames.push(command.functionName);
|
|
4825
5104
|
break;
|
|
4826
5105
|
case 'KNOWLEDGE':
|
|
4827
5106
|
// TODO: [๐] The knowledge is maybe relevant for just this template
|
|
4828
|
-
knowledgeCommandParser.applyToPipelineJson(pipelineJson,
|
|
5107
|
+
knowledgeCommandParser.applyToPipelineJson(command, { pipelineJson: pipelineJson, templateJson: templateJson });
|
|
4829
5108
|
break;
|
|
4830
5109
|
case 'ACTION':
|
|
4831
5110
|
// TODO: [๐] The action is maybe relevant for just this template
|
|
@@ -4836,7 +5115,7 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4836
5115
|
console.error(new NotYetImplementedError('Instruments are not implemented yet'));
|
|
4837
5116
|
break;
|
|
4838
5117
|
case 'PERSONA':
|
|
4839
|
-
personaCommandParser.applyToPipelineJson(pipelineJson,
|
|
5118
|
+
personaCommandParser.applyToPipelineJson(command, { pipelineJson: pipelineJson, templateJson: templateJson });
|
|
4840
5119
|
// <- Note: Prototype of [๐ง] (remove this comment after full implementation)
|
|
4841
5120
|
break;
|
|
4842
5121
|
case 'BOILERPLATE':
|
|
@@ -4855,60 +5134,29 @@ function pipelineStringToJsonSync(pipelineString) {
|
|
|
4855
5134
|
}
|
|
4856
5135
|
finally { if (e_3) throw e_3.error; }
|
|
4857
5136
|
}
|
|
4858
|
-
|
|
5137
|
+
// TODO: [๐ง] Should be done in BLOCK command
|
|
5138
|
+
if (templateJson.blockType === 'SCRIPT') {
|
|
4859
5139
|
if (!language) {
|
|
4860
5140
|
throw new ParsingError('You must specify the language of the script in the prompt template');
|
|
4861
5141
|
}
|
|
4862
|
-
|
|
5142
|
+
if (!SUPPORTED_SCRIPT_LANGUAGES.includes(language)) {
|
|
4863
5143
|
throw new ParsingError(spaceTrim$1(function (block) { return "\n Script language ".concat(language, " is not supported.\n\n Supported languages are:\n ").concat(block(SUPPORTED_SCRIPT_LANGUAGES.join(', ')), "\n\n "); }));
|
|
4864
5144
|
}
|
|
5145
|
+
templateJson.contentLanguage = language;
|
|
4865
5146
|
}
|
|
4866
|
-
// TODO: [
|
|
4867
|
-
var description_1 = section.content;
|
|
4868
|
-
// Note: Remove codeblocks - TODO: [๐พ]
|
|
4869
|
-
description_1 = description_1.split(/^```.*^```/gms).join('');
|
|
4870
|
-
description_1 = description_1.split(/^>.*$/gm).join('');
|
|
4871
|
-
//Note: Remove lists and return statement - TODO: [๐พ]
|
|
4872
|
-
description_1 = description_1.split(/^(?:(?:-)|(?:\d\))|(?:`?->))\s+.*$/gm).join('');
|
|
4873
|
-
description_1 = spaceTrim$1(description_1);
|
|
4874
|
-
if (description_1 === '') {
|
|
4875
|
-
description_1 = undefined;
|
|
4876
|
-
}
|
|
4877
|
-
if (Object.keys(jokers).length === 0) {
|
|
4878
|
-
jokers = undefined;
|
|
4879
|
-
}
|
|
4880
|
-
if (Object.keys(expectAmount).length === 0) {
|
|
4881
|
-
expectAmount = undefined;
|
|
4882
|
-
}
|
|
4883
|
-
if (Object.keys(postprocessing).length === 0) {
|
|
4884
|
-
postprocessing = undefined;
|
|
4885
|
-
}
|
|
4886
|
-
dependentParameterNames = union(dependentParameterNames, extractParametersFromPromptTemplate(__assign(__assign({}, section), { description: description_1, blockType: blockType, content: content })));
|
|
5147
|
+
// TODO: [๐ง][โ] Should be done in BLOCK command
|
|
4887
5148
|
if (templateModelRequirements.modelVariant === undefined) {
|
|
4888
5149
|
templateModelRequirements.modelVariant = 'CHAT';
|
|
4889
5150
|
}
|
|
4890
|
-
dependentParameterNames =
|
|
4891
|
-
|
|
4892
|
-
|
|
4893
|
-
|
|
4894
|
-
description: description_1,
|
|
4895
|
-
dependentParameterNames: Array.from(dependentParameterNames),
|
|
4896
|
-
blockType: blockType,
|
|
4897
|
-
jokers: jokers,
|
|
4898
|
-
postprocessing: postprocessing,
|
|
4899
|
-
expectations: expectAmount,
|
|
4900
|
-
expectFormat: expectFormat,
|
|
4901
|
-
personaName: null,
|
|
4902
|
-
modelRequirements: templateModelRequirements,
|
|
4903
|
-
contentLanguage: blockType === 'SCRIPT' ? language : undefined,
|
|
4904
|
-
content: content,
|
|
4905
|
-
resultingParameterName: expectResultingParameterName( /* <- Note: This is once more redundant */),
|
|
4906
|
-
};
|
|
4907
|
-
if (blockType !== 'PROMPT_TEMPLATE') {
|
|
4908
|
-
delete template.modelRequirements;
|
|
5151
|
+
templateJson.dependentParameterNames = Array.from(extractParametersFromPromptTemplate(templateJson));
|
|
5152
|
+
// TODO: [๐ง][โ] Remove this condition - modelRequirements should be put here via BLOCK command not removed when PROMPT_TEMPLATE
|
|
5153
|
+
if (templateJson.blockType !== 'PROMPT_TEMPLATE') {
|
|
5154
|
+
delete templateJson.modelRequirements;
|
|
4909
5155
|
}
|
|
5156
|
+
// TODO: [๐ง] Make this better - for example each command parser can call and apply this
|
|
5157
|
+
templateJson.resultingParameterName = expectResultingParameterName( /* <- Note: This is once more redundant */);
|
|
4910
5158
|
// TODO: [๐ง] What actually about preparation and pushing the block into `promptTemplates`
|
|
4911
|
-
pipelineJson.promptTemplates.push(
|
|
5159
|
+
pipelineJson.promptTemplates.push(templateJson);
|
|
4912
5160
|
};
|
|
4913
5161
|
try {
|
|
4914
5162
|
// =============================================================
|
|
@@ -5361,5 +5609,1086 @@ function listAllFiles(path, isRecursive) {
|
|
|
5361
5609
|
* Note: [๐ข] This code should never be published outside of `@promptbook/node`
|
|
5362
5610
|
*/
|
|
5363
5611
|
|
|
5364
|
-
|
|
5612
|
+
/**
|
|
5613
|
+
* This error type indicates that you try to use a feature that is not available in the current environment
|
|
5614
|
+
*/
|
|
5615
|
+
var EnvironmentMismatchError = /** @class */ (function (_super) {
|
|
5616
|
+
__extends(EnvironmentMismatchError, _super);
|
|
5617
|
+
function EnvironmentMismatchError(message) {
|
|
5618
|
+
var _this = _super.call(this, message) || this;
|
|
5619
|
+
_this.name = 'EnvironmentMismatchError';
|
|
5620
|
+
Object.setPrototypeOf(_this, EnvironmentMismatchError.prototype);
|
|
5621
|
+
return _this;
|
|
5622
|
+
}
|
|
5623
|
+
return EnvironmentMismatchError;
|
|
5624
|
+
}(Error));
|
|
5625
|
+
|
|
5626
|
+
/**
|
|
5627
|
+
* Helper of usage compute
|
|
5628
|
+
*
|
|
5629
|
+
* @param content the content of prompt or response
|
|
5630
|
+
* @returns part of PromptResultUsageCounts
|
|
5631
|
+
*
|
|
5632
|
+
* @private internal util of LlmExecutionTools
|
|
5633
|
+
*/
|
|
5634
|
+
function computeUsageCounts(content) {
|
|
5635
|
+
return {
|
|
5636
|
+
charactersCount: { value: countCharacters(content) },
|
|
5637
|
+
wordsCount: { value: countWords(content) },
|
|
5638
|
+
sentencesCount: { value: countSentences(content) },
|
|
5639
|
+
linesCount: { value: countLines(content) },
|
|
5640
|
+
paragraphsCount: { value: countParagraphs(content) },
|
|
5641
|
+
pagesCount: { value: countPages(content) },
|
|
5642
|
+
};
|
|
5643
|
+
}
|
|
5644
|
+
|
|
5645
|
+
/**
|
|
5646
|
+
* Make UncertainNumber
|
|
5647
|
+
*
|
|
5648
|
+
* @param value
|
|
5649
|
+
*
|
|
5650
|
+
* @private utility for initializating UncertainNumber
|
|
5651
|
+
*/
|
|
5652
|
+
function uncertainNumber(value) {
|
|
5653
|
+
if (value === null || value === undefined || Number.isNaN(NaN)) {
|
|
5654
|
+
return { value: 0, isUncertain: true };
|
|
5655
|
+
}
|
|
5656
|
+
return { value: value };
|
|
5657
|
+
}
|
|
5658
|
+
|
|
5659
|
+
/**
|
|
5660
|
+
* Get current date in ISO 8601 format
|
|
5661
|
+
*
|
|
5662
|
+
* @private This is internal util of the promptbook
|
|
5663
|
+
*/
|
|
5664
|
+
function getCurrentIsoDate() {
|
|
5665
|
+
return new Date().toISOString();
|
|
5666
|
+
}
|
|
5667
|
+
|
|
5668
|
+
/**
|
|
5669
|
+
* Function computeUsage will create price per one token based on the string value found on openai page
|
|
5670
|
+
*
|
|
5671
|
+
* @private within the repository, used only as internal helper for `OPENAI_MODELS`
|
|
5672
|
+
*/
|
|
5673
|
+
function computeUsage(value) {
|
|
5674
|
+
var _a = __read(value.split(' / '), 2), price = _a[0], tokens = _a[1];
|
|
5675
|
+
return parseFloat(price.replace('$', '')) / parseFloat(tokens.replace('M tokens', '')) / 1000000;
|
|
5676
|
+
}
|
|
5677
|
+
|
|
5678
|
+
/**
|
|
5679
|
+
* List of available Anthropic Claude models with pricing
|
|
5680
|
+
*
|
|
5681
|
+
* Note: Done at 2024-05-25
|
|
5682
|
+
*
|
|
5683
|
+
* @see https://docs.anthropic.com/en/docs/models-overview
|
|
5684
|
+
*/
|
|
5685
|
+
var ANTHROPIC_CLAUDE_MODELS = [
|
|
5686
|
+
{
|
|
5687
|
+
modelVariant: 'CHAT',
|
|
5688
|
+
modelTitle: 'Claude 3 Opus',
|
|
5689
|
+
modelName: 'claude-3-opus-20240229',
|
|
5690
|
+
pricing: {
|
|
5691
|
+
prompt: computeUsage("$15.00 / 1M tokens"),
|
|
5692
|
+
output: computeUsage("$75.00 / 1M tokens"),
|
|
5693
|
+
},
|
|
5694
|
+
},
|
|
5695
|
+
{
|
|
5696
|
+
modelVariant: 'CHAT',
|
|
5697
|
+
modelTitle: 'Claude 3 Sonnet',
|
|
5698
|
+
modelName: 'claude-3-sonnet-20240229',
|
|
5699
|
+
pricing: {
|
|
5700
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
5701
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
5702
|
+
},
|
|
5703
|
+
},
|
|
5704
|
+
{
|
|
5705
|
+
modelVariant: 'CHAT',
|
|
5706
|
+
modelTitle: 'Claude 3 Haiku',
|
|
5707
|
+
modelName: ' claude-3-haiku-20240307',
|
|
5708
|
+
pricing: {
|
|
5709
|
+
prompt: computeUsage("$0.25 / 1M tokens"),
|
|
5710
|
+
output: computeUsage("$1.25 / 1M tokens"),
|
|
5711
|
+
},
|
|
5712
|
+
},
|
|
5713
|
+
{
|
|
5714
|
+
modelVariant: 'CHAT',
|
|
5715
|
+
modelTitle: 'Claude 2.1',
|
|
5716
|
+
modelName: 'claude-2.1',
|
|
5717
|
+
pricing: {
|
|
5718
|
+
prompt: computeUsage("$8.00 / 1M tokens"),
|
|
5719
|
+
output: computeUsage("$24.00 / 1M tokens"),
|
|
5720
|
+
},
|
|
5721
|
+
},
|
|
5722
|
+
{
|
|
5723
|
+
modelVariant: 'CHAT',
|
|
5724
|
+
modelTitle: 'Claude 2',
|
|
5725
|
+
modelName: 'claude-2.0',
|
|
5726
|
+
pricing: {
|
|
5727
|
+
prompt: computeUsage("$8.00 / 1M tokens"),
|
|
5728
|
+
output: computeUsage("$24.00 / 1M tokens"),
|
|
5729
|
+
},
|
|
5730
|
+
},
|
|
5731
|
+
{
|
|
5732
|
+
modelVariant: 'CHAT',
|
|
5733
|
+
modelTitle: ' Claude Instant 1.2',
|
|
5734
|
+
modelName: 'claude-instant-1.2',
|
|
5735
|
+
pricing: {
|
|
5736
|
+
prompt: computeUsage("$0.80 / 1M tokens"),
|
|
5737
|
+
output: computeUsage("$2.40 / 1M tokens"),
|
|
5738
|
+
},
|
|
5739
|
+
},
|
|
5740
|
+
// TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
|
|
5741
|
+
];
|
|
5742
|
+
/**
|
|
5743
|
+
* Note: [๐ค] Add models of new variant
|
|
5744
|
+
* TODO: [๐ง ] !!! Add embedding models OR Anthropic has only chat+completion models?
|
|
5745
|
+
* TODO: [๐ง ] Some mechanism to propagate unsureness
|
|
5746
|
+
* TODO: [๐ง ][๐ฎโโ๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
5747
|
+
* TODO: [๐] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
5748
|
+
*/
|
|
5749
|
+
|
|
5750
|
+
/**
|
|
5751
|
+
* Execution Tools for calling Anthropic Claude API.
|
|
5752
|
+
*/
|
|
5753
|
+
var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
5754
|
+
/**
|
|
5755
|
+
* Creates Anthropic Claude Execution Tools.
|
|
5756
|
+
*
|
|
5757
|
+
* @param options which are relevant are directly passed to the Anthropic Claude client
|
|
5758
|
+
*/
|
|
5759
|
+
function AnthropicClaudeExecutionTools(options) {
|
|
5760
|
+
if (options === void 0) { options = {}; }
|
|
5761
|
+
this.options = options;
|
|
5762
|
+
// Note: Passing only Anthropic Claude relevant options to Anthropic constructor
|
|
5763
|
+
var anthropicOptions = __assign({}, options);
|
|
5764
|
+
delete anthropicOptions.isVerbose;
|
|
5765
|
+
this.client = new Anthropic(anthropicOptions);
|
|
5766
|
+
}
|
|
5767
|
+
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
5768
|
+
get: function () {
|
|
5769
|
+
return 'Anthropic Claude';
|
|
5770
|
+
},
|
|
5771
|
+
enumerable: false,
|
|
5772
|
+
configurable: true
|
|
5773
|
+
});
|
|
5774
|
+
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "description", {
|
|
5775
|
+
get: function () {
|
|
5776
|
+
return 'Use all models provided by Anthropic Claude';
|
|
5777
|
+
},
|
|
5778
|
+
enumerable: false,
|
|
5779
|
+
configurable: true
|
|
5780
|
+
});
|
|
5781
|
+
/**
|
|
5782
|
+
* Calls Anthropic Claude API to use a chat model.
|
|
5783
|
+
*/
|
|
5784
|
+
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
5785
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
5786
|
+
var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
5787
|
+
return __generator(this, function (_a) {
|
|
5788
|
+
switch (_a.label) {
|
|
5789
|
+
case 0:
|
|
5790
|
+
if (this.options.isVerbose) {
|
|
5791
|
+
console.info('๐ฌ Anthropic Claude callChatModel call');
|
|
5792
|
+
}
|
|
5793
|
+
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
5794
|
+
// TODO: [โ] Use here more modelRequirements
|
|
5795
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
5796
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
5797
|
+
}
|
|
5798
|
+
rawRequest = {
|
|
5799
|
+
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
5800
|
+
max_tokens: modelRequirements.maxTokens || 4096,
|
|
5801
|
+
// <- TODO: [๐พ] Make some global max cap for maxTokens
|
|
5802
|
+
temperature: modelRequirements.temperature,
|
|
5803
|
+
system: modelRequirements.systemMessage,
|
|
5804
|
+
// <- TODO: [๐] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
5805
|
+
// <- Note: [๐ง]
|
|
5806
|
+
messages: [
|
|
5807
|
+
{
|
|
5808
|
+
role: 'user',
|
|
5809
|
+
content: replaceParameters(content, parameters),
|
|
5810
|
+
},
|
|
5811
|
+
],
|
|
5812
|
+
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
5813
|
+
};
|
|
5814
|
+
start = getCurrentIsoDate();
|
|
5815
|
+
if (this.options.isVerbose) {
|
|
5816
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
5817
|
+
}
|
|
5818
|
+
return [4 /*yield*/, this.client.messages.create(rawRequest)];
|
|
5819
|
+
case 1:
|
|
5820
|
+
rawResponse = _a.sent();
|
|
5821
|
+
if (this.options.isVerbose) {
|
|
5822
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
5823
|
+
}
|
|
5824
|
+
if (!rawResponse.content[0]) {
|
|
5825
|
+
throw new PipelineExecutionError('No content from Anthropic Claude');
|
|
5826
|
+
}
|
|
5827
|
+
if (rawResponse.content.length > 1) {
|
|
5828
|
+
throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
|
|
5829
|
+
}
|
|
5830
|
+
resultContent = rawResponse.content[0].text;
|
|
5831
|
+
// eslint-disable-next-line prefer-const
|
|
5832
|
+
complete = getCurrentIsoDate();
|
|
5833
|
+
usage = {
|
|
5834
|
+
price: { value: 0, isUncertain: true } /* <- TODO: [๐] Compute usage */,
|
|
5835
|
+
input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
|
|
5836
|
+
output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
|
|
5837
|
+
};
|
|
5838
|
+
return [2 /*return*/, {
|
|
5839
|
+
content: resultContent,
|
|
5840
|
+
modelName: rawResponse.model,
|
|
5841
|
+
timing: {
|
|
5842
|
+
start: start,
|
|
5843
|
+
complete: complete,
|
|
5844
|
+
},
|
|
5845
|
+
usage: usage,
|
|
5846
|
+
rawResponse: rawResponse,
|
|
5847
|
+
// <- [๐คนโโ๏ธ]
|
|
5848
|
+
}];
|
|
5849
|
+
}
|
|
5850
|
+
});
|
|
5851
|
+
});
|
|
5852
|
+
};
|
|
5853
|
+
/*
|
|
5854
|
+
TODO: [๐]
|
|
5855
|
+
public async callCompletionModel(
|
|
5856
|
+
prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
|
|
5857
|
+
): Promise<PromptCompletionResult> {
|
|
5858
|
+
|
|
5859
|
+
if (this.options.isVerbose) {
|
|
5860
|
+
console.info('๐ Anthropic Claude callCompletionModel call');
|
|
5861
|
+
}
|
|
5862
|
+
|
|
5863
|
+
const { content, parameters, modelRequirements } = prompt;
|
|
5864
|
+
|
|
5865
|
+
// TODO: [โ] Use here more modelRequirements
|
|
5866
|
+
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
5867
|
+
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
5868
|
+
}
|
|
5869
|
+
|
|
5870
|
+
const model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
5871
|
+
const modelSettings = {
|
|
5872
|
+
model: rawResponse.model || model,
|
|
5873
|
+
max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
|
|
5874
|
+
// <- TODO: [๐พ] Make some global max cap for maxTokens
|
|
5875
|
+
// <- TODO: Use here `systemMessage`, `temperature` and `seed`
|
|
5876
|
+
};
|
|
5877
|
+
|
|
5878
|
+
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
5879
|
+
...modelSettings,
|
|
5880
|
+
prompt: replaceParameters(content, parameters),
|
|
5881
|
+
user: this.options.user,
|
|
5882
|
+
};
|
|
5883
|
+
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
5884
|
+
let complete: string_date_iso8601;
|
|
5885
|
+
|
|
5886
|
+
if (this.options.isVerbose) {
|
|
5887
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
5888
|
+
}
|
|
5889
|
+
const rawResponse = await this.client.completions.create(rawRequest);
|
|
5890
|
+
if (this.options.isVerbose) {
|
|
5891
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
5892
|
+
}
|
|
5893
|
+
|
|
5894
|
+
if (!rawResponse.choices[0]) {
|
|
5895
|
+
throw new PipelineExecutionError('No choises from Anthropic Claude');
|
|
5896
|
+
}
|
|
5897
|
+
|
|
5898
|
+
if (rawResponse.choices.length > 1) {
|
|
5899
|
+
// TODO: This should be maybe only warning
|
|
5900
|
+
throw new PipelineExecutionError('More than one choise from Anthropic Claude');
|
|
5901
|
+
}
|
|
5902
|
+
|
|
5903
|
+
const resultContent = rawResponse.choices[0].text;
|
|
5904
|
+
// eslint-disable-next-line prefer-const
|
|
5905
|
+
complete = getCurrentIsoDate();
|
|
5906
|
+
const usage = { price: 'UNKNOWN', inputTokens: 0, outputTokens: 0 /* <- TODO: [๐] Compute usage * / } satisfies PromptResultUsage;
|
|
5907
|
+
|
|
5908
|
+
|
|
5909
|
+
|
|
5910
|
+
return {
|
|
5911
|
+
content: resultContent,
|
|
5912
|
+
modelName: rawResponse.model || model,
|
|
5913
|
+
timing: {
|
|
5914
|
+
start,
|
|
5915
|
+
complete,
|
|
5916
|
+
},
|
|
5917
|
+
usage,
|
|
5918
|
+
rawResponse,
|
|
5919
|
+
// <- [๐คนโโ๏ธ]
|
|
5920
|
+
};
|
|
5921
|
+
}
|
|
5922
|
+
*/
|
|
5923
|
+
// <- Note: [๐ค] callXxxModel
|
|
5924
|
+
/**
|
|
5925
|
+
* Get the model that should be used as default
|
|
5926
|
+
*/
|
|
5927
|
+
AnthropicClaudeExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
|
|
5928
|
+
var model = ANTHROPIC_CLAUDE_MODELS.find(function (_a) {
|
|
5929
|
+
var modelName = _a.modelName;
|
|
5930
|
+
return modelName.startsWith(defaultModelName);
|
|
5931
|
+
});
|
|
5932
|
+
if (model === undefined) {
|
|
5933
|
+
throw new UnexpectedError(spaceTrim(function (block) {
|
|
5934
|
+
return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(ANTHROPIC_CLAUDE_MODELS.map(function (_a) {
|
|
5935
|
+
var modelName = _a.modelName;
|
|
5936
|
+
return "- \"".concat(modelName, "\"");
|
|
5937
|
+
}).join('\n')), "\n\n ");
|
|
5938
|
+
}));
|
|
5939
|
+
}
|
|
5940
|
+
return model;
|
|
5941
|
+
};
|
|
5942
|
+
/**
|
|
5943
|
+
* Default model for chat variant.
|
|
5944
|
+
*/
|
|
5945
|
+
AnthropicClaudeExecutionTools.prototype.getDefaultChatModel = function () {
|
|
5946
|
+
return this.getDefaultModel('claude-3-opus');
|
|
5947
|
+
};
|
|
5948
|
+
// <- Note: [๐ค] getDefaultXxxModel
|
|
5949
|
+
/**
|
|
5950
|
+
* List all available Anthropic Claude models that can be used
|
|
5951
|
+
*/
|
|
5952
|
+
AnthropicClaudeExecutionTools.prototype.listModels = function () {
|
|
5953
|
+
return ANTHROPIC_CLAUDE_MODELS;
|
|
5954
|
+
};
|
|
5955
|
+
return AnthropicClaudeExecutionTools;
|
|
5956
|
+
}());
|
|
5957
|
+
/**
|
|
5958
|
+
* TODO: !!!! [๐] JSON mode
|
|
5959
|
+
* TODO: [๐ง ] Maybe handle errors via transformAnthropicError (like transformAzureError)
|
|
5960
|
+
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
5961
|
+
* TODO: Maybe make custom OpenaiError
|
|
5962
|
+
* TODO: [๐ง ][๐] Maybe use `isDeterministic` from options
|
|
5963
|
+
*/
|
|
5964
|
+
|
|
5965
|
+
/**
|
|
5966
|
+
* List of available OpenAI models with pricing
|
|
5967
|
+
*
|
|
5968
|
+
* Note: Done at 2024-05-20
|
|
5969
|
+
*
|
|
5970
|
+
* @see https://platform.openai.com/docs/models/
|
|
5971
|
+
* @see https://openai.com/api/pricing/
|
|
5972
|
+
*/
|
|
5973
|
+
var OPENAI_MODELS = [
|
|
5974
|
+
/*/
|
|
5975
|
+
{
|
|
5976
|
+
modelTitle: 'dall-e-3',
|
|
5977
|
+
modelName: 'dall-e-3',
|
|
5978
|
+
},
|
|
5979
|
+
/**/
|
|
5980
|
+
/*/
|
|
5981
|
+
{
|
|
5982
|
+
modelTitle: 'whisper-1',
|
|
5983
|
+
modelName: 'whisper-1',
|
|
5984
|
+
},
|
|
5985
|
+
/**/
|
|
5986
|
+
/**/
|
|
5987
|
+
{
|
|
5988
|
+
modelVariant: 'COMPLETION',
|
|
5989
|
+
modelTitle: 'davinci-002',
|
|
5990
|
+
modelName: 'davinci-002',
|
|
5991
|
+
pricing: {
|
|
5992
|
+
prompt: computeUsage("$2.00 / 1M tokens"),
|
|
5993
|
+
output: computeUsage("$2.00 / 1M tokens"), // <- not sure
|
|
5994
|
+
},
|
|
5995
|
+
},
|
|
5996
|
+
/**/
|
|
5997
|
+
/*/
|
|
5998
|
+
{
|
|
5999
|
+
modelTitle: 'dall-e-2',
|
|
6000
|
+
modelName: 'dall-e-2',
|
|
6001
|
+
},
|
|
6002
|
+
/**/
|
|
6003
|
+
/**/
|
|
6004
|
+
{
|
|
6005
|
+
modelVariant: 'CHAT',
|
|
6006
|
+
modelTitle: 'gpt-3.5-turbo-16k',
|
|
6007
|
+
modelName: 'gpt-3.5-turbo-16k',
|
|
6008
|
+
pricing: {
|
|
6009
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6010
|
+
output: computeUsage("$4.00 / 1M tokens"),
|
|
6011
|
+
},
|
|
6012
|
+
},
|
|
6013
|
+
/**/
|
|
6014
|
+
/*/
|
|
6015
|
+
{
|
|
6016
|
+
modelTitle: 'tts-1-hd-1106',
|
|
6017
|
+
modelName: 'tts-1-hd-1106',
|
|
6018
|
+
},
|
|
6019
|
+
/**/
|
|
6020
|
+
/*/
|
|
6021
|
+
{
|
|
6022
|
+
modelTitle: 'tts-1-hd',
|
|
6023
|
+
modelName: 'tts-1-hd',
|
|
6024
|
+
},
|
|
6025
|
+
/**/
|
|
6026
|
+
/**/
|
|
6027
|
+
{
|
|
6028
|
+
modelVariant: 'CHAT',
|
|
6029
|
+
modelTitle: 'gpt-4',
|
|
6030
|
+
modelName: 'gpt-4',
|
|
6031
|
+
pricing: {
|
|
6032
|
+
prompt: computeUsage("$30.00 / 1M tokens"),
|
|
6033
|
+
output: computeUsage("$60.00 / 1M tokens"),
|
|
6034
|
+
},
|
|
6035
|
+
},
|
|
6036
|
+
/**/
|
|
6037
|
+
/**/
|
|
6038
|
+
{
|
|
6039
|
+
modelVariant: 'CHAT',
|
|
6040
|
+
modelTitle: 'gpt-4-32k',
|
|
6041
|
+
modelName: 'gpt-4-32k',
|
|
6042
|
+
pricing: {
|
|
6043
|
+
prompt: computeUsage("$60.00 / 1M tokens"),
|
|
6044
|
+
output: computeUsage("$120.00 / 1M tokens"),
|
|
6045
|
+
},
|
|
6046
|
+
},
|
|
6047
|
+
/**/
|
|
6048
|
+
/*/
|
|
6049
|
+
{
|
|
6050
|
+
modelVariant: 'CHAT',
|
|
6051
|
+
modelTitle: 'gpt-4-0613',
|
|
6052
|
+
modelName: 'gpt-4-0613',
|
|
6053
|
+
pricing: {
|
|
6054
|
+
prompt: computeUsage(` / 1M tokens`),
|
|
6055
|
+
output: computeUsage(` / 1M tokens`),
|
|
6056
|
+
},
|
|
6057
|
+
},
|
|
6058
|
+
/**/
|
|
6059
|
+
/**/
|
|
6060
|
+
{
|
|
6061
|
+
modelVariant: 'CHAT',
|
|
6062
|
+
modelTitle: 'gpt-4-turbo-2024-04-09',
|
|
6063
|
+
modelName: 'gpt-4-turbo-2024-04-09',
|
|
6064
|
+
pricing: {
|
|
6065
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
6066
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
6067
|
+
},
|
|
6068
|
+
},
|
|
6069
|
+
/**/
|
|
6070
|
+
/**/
|
|
6071
|
+
{
|
|
6072
|
+
modelVariant: 'CHAT',
|
|
6073
|
+
modelTitle: 'gpt-3.5-turbo-1106',
|
|
6074
|
+
modelName: 'gpt-3.5-turbo-1106',
|
|
6075
|
+
pricing: {
|
|
6076
|
+
prompt: computeUsage("$1.00 / 1M tokens"),
|
|
6077
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
6078
|
+
},
|
|
6079
|
+
},
|
|
6080
|
+
/**/
|
|
6081
|
+
/**/
|
|
6082
|
+
{
|
|
6083
|
+
modelVariant: 'CHAT',
|
|
6084
|
+
modelTitle: 'gpt-4-turbo',
|
|
6085
|
+
modelName: 'gpt-4-turbo',
|
|
6086
|
+
pricing: {
|
|
6087
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
6088
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
6089
|
+
},
|
|
6090
|
+
},
|
|
6091
|
+
/**/
|
|
6092
|
+
/**/
|
|
6093
|
+
{
|
|
6094
|
+
modelVariant: 'COMPLETION',
|
|
6095
|
+
modelTitle: 'gpt-3.5-turbo-instruct-0914',
|
|
6096
|
+
modelName: 'gpt-3.5-turbo-instruct-0914',
|
|
6097
|
+
pricing: {
|
|
6098
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
6099
|
+
output: computeUsage("$2.00 / 1M tokens"), // <- For gpt-3.5-turbo-instruct
|
|
6100
|
+
},
|
|
6101
|
+
},
|
|
6102
|
+
/**/
|
|
6103
|
+
/**/
|
|
6104
|
+
{
|
|
6105
|
+
modelVariant: 'COMPLETION',
|
|
6106
|
+
modelTitle: 'gpt-3.5-turbo-instruct',
|
|
6107
|
+
modelName: 'gpt-3.5-turbo-instruct',
|
|
6108
|
+
pricing: {
|
|
6109
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
6110
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
6111
|
+
},
|
|
6112
|
+
},
|
|
6113
|
+
/**/
|
|
6114
|
+
/*/
|
|
6115
|
+
{
|
|
6116
|
+
modelTitle: 'tts-1',
|
|
6117
|
+
modelName: 'tts-1',
|
|
6118
|
+
},
|
|
6119
|
+
/**/
|
|
6120
|
+
/**/
|
|
6121
|
+
{
|
|
6122
|
+
modelVariant: 'CHAT',
|
|
6123
|
+
modelTitle: 'gpt-3.5-turbo',
|
|
6124
|
+
modelName: 'gpt-3.5-turbo',
|
|
6125
|
+
pricing: {
|
|
6126
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6127
|
+
output: computeUsage("$6.00 / 1M tokens"), // <- Not sure, refer to gpt-3.5-turbo in Fine-tuning models
|
|
6128
|
+
},
|
|
6129
|
+
},
|
|
6130
|
+
/**/
|
|
6131
|
+
/**/
|
|
6132
|
+
{
|
|
6133
|
+
modelVariant: 'CHAT',
|
|
6134
|
+
modelTitle: 'gpt-3.5-turbo-0301',
|
|
6135
|
+
modelName: 'gpt-3.5-turbo-0301',
|
|
6136
|
+
pricing: {
|
|
6137
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
6138
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
6139
|
+
},
|
|
6140
|
+
},
|
|
6141
|
+
/**/
|
|
6142
|
+
/**/
|
|
6143
|
+
{
|
|
6144
|
+
modelVariant: 'COMPLETION',
|
|
6145
|
+
modelTitle: 'babbage-002',
|
|
6146
|
+
modelName: 'babbage-002',
|
|
6147
|
+
pricing: {
|
|
6148
|
+
prompt: computeUsage("$0.40 / 1M tokens"),
|
|
6149
|
+
output: computeUsage("$0.40 / 1M tokens"), // <- Not sure
|
|
6150
|
+
},
|
|
6151
|
+
},
|
|
6152
|
+
/**/
|
|
6153
|
+
/**/
|
|
6154
|
+
{
|
|
6155
|
+
modelVariant: 'CHAT',
|
|
6156
|
+
modelTitle: 'gpt-4-1106-preview',
|
|
6157
|
+
modelName: 'gpt-4-1106-preview',
|
|
6158
|
+
pricing: {
|
|
6159
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
6160
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
6161
|
+
},
|
|
6162
|
+
},
|
|
6163
|
+
/**/
|
|
6164
|
+
/**/
|
|
6165
|
+
{
|
|
6166
|
+
modelVariant: 'CHAT',
|
|
6167
|
+
modelTitle: 'gpt-4-0125-preview',
|
|
6168
|
+
modelName: 'gpt-4-0125-preview',
|
|
6169
|
+
pricing: {
|
|
6170
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
6171
|
+
output: computeUsage("$30.00 / 1M tokens"),
|
|
6172
|
+
},
|
|
6173
|
+
},
|
|
6174
|
+
/**/
|
|
6175
|
+
/*/
|
|
6176
|
+
{
|
|
6177
|
+
modelTitle: 'tts-1-1106',
|
|
6178
|
+
modelName: 'tts-1-1106',
|
|
6179
|
+
},
|
|
6180
|
+
/**/
|
|
6181
|
+
/**/
|
|
6182
|
+
{
|
|
6183
|
+
modelVariant: 'CHAT',
|
|
6184
|
+
modelTitle: 'gpt-3.5-turbo-0125',
|
|
6185
|
+
modelName: 'gpt-3.5-turbo-0125',
|
|
6186
|
+
pricing: {
|
|
6187
|
+
prompt: computeUsage("$0.50 / 1M tokens"),
|
|
6188
|
+
output: computeUsage("$1.50 / 1M tokens"),
|
|
6189
|
+
},
|
|
6190
|
+
},
|
|
6191
|
+
/**/
|
|
6192
|
+
/**/
|
|
6193
|
+
{
|
|
6194
|
+
modelVariant: 'CHAT',
|
|
6195
|
+
modelTitle: 'gpt-4-turbo-preview',
|
|
6196
|
+
modelName: 'gpt-4-turbo-preview',
|
|
6197
|
+
pricing: {
|
|
6198
|
+
prompt: computeUsage("$10.00 / 1M tokens"),
|
|
6199
|
+
output: computeUsage("$30.00 / 1M tokens"), // <- Not sure, just for gpt-4-turbo
|
|
6200
|
+
},
|
|
6201
|
+
},
|
|
6202
|
+
/**/
|
|
6203
|
+
/**/
|
|
6204
|
+
{
|
|
6205
|
+
modelVariant: 'EMBEDDING',
|
|
6206
|
+
modelTitle: 'text-embedding-3-large',
|
|
6207
|
+
modelName: 'text-embedding-3-large',
|
|
6208
|
+
pricing: {
|
|
6209
|
+
prompt: computeUsage("$0.13 / 1M tokens"),
|
|
6210
|
+
// TODO: [๐] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
6211
|
+
output: 0, // <- Note: [๐] In Embedding models you dont pay for output
|
|
6212
|
+
},
|
|
6213
|
+
},
|
|
6214
|
+
/**/
|
|
6215
|
+
/**/
|
|
6216
|
+
{
|
|
6217
|
+
modelVariant: 'EMBEDDING',
|
|
6218
|
+
modelTitle: 'text-embedding-3-small',
|
|
6219
|
+
modelName: 'text-embedding-3-small',
|
|
6220
|
+
pricing: {
|
|
6221
|
+
prompt: computeUsage("$0.02 / 1M tokens"),
|
|
6222
|
+
// TODO: [๐] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
6223
|
+
output: 0, // <- Note: [๐] In Embedding models you dont pay for output
|
|
6224
|
+
},
|
|
6225
|
+
},
|
|
6226
|
+
/**/
|
|
6227
|
+
/**/
|
|
6228
|
+
{
|
|
6229
|
+
modelVariant: 'CHAT',
|
|
6230
|
+
modelTitle: 'gpt-3.5-turbo-0613',
|
|
6231
|
+
modelName: 'gpt-3.5-turbo-0613',
|
|
6232
|
+
pricing: {
|
|
6233
|
+
prompt: computeUsage("$1.50 / 1M tokens"),
|
|
6234
|
+
output: computeUsage("$2.00 / 1M tokens"),
|
|
6235
|
+
},
|
|
6236
|
+
},
|
|
6237
|
+
/**/
|
|
6238
|
+
/**/
|
|
6239
|
+
{
|
|
6240
|
+
modelVariant: 'EMBEDDING',
|
|
6241
|
+
modelTitle: 'text-embedding-ada-002',
|
|
6242
|
+
modelName: 'text-embedding-ada-002',
|
|
6243
|
+
pricing: {
|
|
6244
|
+
prompt: computeUsage("$0.1 / 1M tokens"),
|
|
6245
|
+
// TODO: [๐] Leverage the batch API @see https://platform.openai.com/docs/guides/batch
|
|
6246
|
+
output: 0, // <- Note: [๐] In Embedding models you dont pay for output
|
|
6247
|
+
},
|
|
6248
|
+
},
|
|
6249
|
+
/**/
|
|
6250
|
+
/*/
|
|
6251
|
+
{
|
|
6252
|
+
modelVariant: 'CHAT',
|
|
6253
|
+
modelTitle: 'gpt-4-1106-vision-preview',
|
|
6254
|
+
modelName: 'gpt-4-1106-vision-preview',
|
|
6255
|
+
},
|
|
6256
|
+
/**/
|
|
6257
|
+
/*/
|
|
6258
|
+
{
|
|
6259
|
+
modelVariant: 'CHAT',
|
|
6260
|
+
modelTitle: 'gpt-4-vision-preview',
|
|
6261
|
+
modelName: 'gpt-4-vision-preview',
|
|
6262
|
+
pricing: {
|
|
6263
|
+
prompt: computeUsage(`$10.00 / 1M tokens`),
|
|
6264
|
+
output: computeUsage(`$30.00 / 1M tokens`),
|
|
6265
|
+
},
|
|
6266
|
+
},
|
|
6267
|
+
/**/
|
|
6268
|
+
/**/
|
|
6269
|
+
{
|
|
6270
|
+
modelVariant: 'CHAT',
|
|
6271
|
+
modelTitle: 'gpt-4o-2024-05-13',
|
|
6272
|
+
modelName: 'gpt-4o-2024-05-13',
|
|
6273
|
+
pricing: {
|
|
6274
|
+
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
6275
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
6276
|
+
},
|
|
6277
|
+
},
|
|
6278
|
+
/**/
|
|
6279
|
+
/**/
|
|
6280
|
+
{
|
|
6281
|
+
modelVariant: 'CHAT',
|
|
6282
|
+
modelTitle: 'gpt-4o',
|
|
6283
|
+
modelName: 'gpt-4o',
|
|
6284
|
+
pricing: {
|
|
6285
|
+
prompt: computeUsage("$5.00 / 1M tokens"),
|
|
6286
|
+
output: computeUsage("$15.00 / 1M tokens"),
|
|
6287
|
+
},
|
|
6288
|
+
},
|
|
6289
|
+
/**/
|
|
6290
|
+
/**/
|
|
6291
|
+
{
|
|
6292
|
+
modelVariant: 'CHAT',
|
|
6293
|
+
modelTitle: 'gpt-3.5-turbo-16k-0613',
|
|
6294
|
+
modelName: 'gpt-3.5-turbo-16k-0613',
|
|
6295
|
+
pricing: {
|
|
6296
|
+
prompt: computeUsage("$3.00 / 1M tokens"),
|
|
6297
|
+
output: computeUsage("$4.00 / 1M tokens"),
|
|
6298
|
+
},
|
|
6299
|
+
},
|
|
6300
|
+
/**/
|
|
6301
|
+
];
|
|
6302
|
+
/**
|
|
6303
|
+
* Note: [๐ค] Add models of new variant
|
|
6304
|
+
* TODO: [๐ง ] Some mechanism to propagate unsureness
|
|
6305
|
+
* TODO: [๐][๐ฎโโ๏ธ] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
|
|
6306
|
+
* TODO: [๐ง ][๐ฎโโ๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
|
|
6307
|
+
* @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
|
|
6308
|
+
* @see https://openai.com/api/pricing/
|
|
6309
|
+
* @see /other/playground/playground.ts
|
|
6310
|
+
* TODO: [๐] Make better
|
|
6311
|
+
* TODO: Change model titles to human eg: "gpt-4-turbo-2024-04-09" -> "GPT-4 Turbo (2024-04-09)"
|
|
6312
|
+
* TODO: [๐ธ] Not all models are compatible with JSON mode, add this information here and use it
|
|
6313
|
+
*/
|
|
6314
|
+
|
|
6315
|
+
/**
|
|
6316
|
+
* Computes the usage of the OpenAI API based on the response from OpenAI
|
|
6317
|
+
*
|
|
6318
|
+
* @param promptContent The content of the prompt
|
|
6319
|
+
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
6320
|
+
* @param rawResponse The raw response from OpenAI API
|
|
6321
|
+
* @throws {PipelineExecutionError} If the usage is not defined in the response from OpenAI
|
|
6322
|
+
* @private internal util of `OpenAiExecutionTools`
|
|
6323
|
+
*/
|
|
6324
|
+
function computeOpenaiUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
6325
|
+
resultContent, rawResponse) {
|
|
6326
|
+
var _a, _b;
|
|
6327
|
+
if (rawResponse.usage === undefined) {
|
|
6328
|
+
throw new PipelineExecutionError('The usage is not defined in the response from OpenAI');
|
|
6329
|
+
}
|
|
6330
|
+
if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.prompt_tokens) === undefined) {
|
|
6331
|
+
throw new PipelineExecutionError('In OpenAI response `usage.prompt_tokens` not defined');
|
|
6332
|
+
}
|
|
6333
|
+
var inputTokens = rawResponse.usage.prompt_tokens;
|
|
6334
|
+
var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completion_tokens) || 0;
|
|
6335
|
+
var modelInfo = OPENAI_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
|
|
6336
|
+
var price;
|
|
6337
|
+
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
6338
|
+
price = uncertainNumber();
|
|
6339
|
+
}
|
|
6340
|
+
else {
|
|
6341
|
+
price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
|
|
6342
|
+
}
|
|
6343
|
+
return {
|
|
6344
|
+
price: price,
|
|
6345
|
+
input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.prompt_tokens) }, computeUsageCounts(promptContent)),
|
|
6346
|
+
output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
|
|
6347
|
+
};
|
|
6348
|
+
}
|
|
6349
|
+
|
|
6350
|
+
/**
|
|
6351
|
+
* Execution Tools for calling OpenAI API.
|
|
6352
|
+
*/
|
|
6353
|
+
var OpenAiExecutionTools = /** @class */ (function () {
|
|
6354
|
+
/**
|
|
6355
|
+
* Creates OpenAI Execution Tools.
|
|
6356
|
+
*
|
|
6357
|
+
* @param options which are relevant are directly passed to the OpenAI client
|
|
6358
|
+
*/
|
|
6359
|
+
function OpenAiExecutionTools(options) {
|
|
6360
|
+
if (options === void 0) { options = {}; }
|
|
6361
|
+
this.options = options;
|
|
6362
|
+
// Note: Passing only OpenAI relevant options to OpenAI constructor
|
|
6363
|
+
var openAiOptions = __assign({}, options);
|
|
6364
|
+
delete openAiOptions.isVerbose;
|
|
6365
|
+
delete openAiOptions.user;
|
|
6366
|
+
this.client = new OpenAI(__assign({}, openAiOptions));
|
|
6367
|
+
}
|
|
6368
|
+
Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
|
|
6369
|
+
get: function () {
|
|
6370
|
+
return 'OpenAI';
|
|
6371
|
+
},
|
|
6372
|
+
enumerable: false,
|
|
6373
|
+
configurable: true
|
|
6374
|
+
});
|
|
6375
|
+
Object.defineProperty(OpenAiExecutionTools.prototype, "description", {
|
|
6376
|
+
get: function () {
|
|
6377
|
+
return 'Use all models provided by OpenAI';
|
|
6378
|
+
},
|
|
6379
|
+
enumerable: false,
|
|
6380
|
+
configurable: true
|
|
6381
|
+
});
|
|
6382
|
+
/**
|
|
6383
|
+
* Calls OpenAI API to use a chat model.
|
|
6384
|
+
*/
|
|
6385
|
+
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6386
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6387
|
+
var content, parameters, modelRequirements, expectFormat, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6388
|
+
return __generator(this, function (_a) {
|
|
6389
|
+
switch (_a.label) {
|
|
6390
|
+
case 0:
|
|
6391
|
+
if (this.options.isVerbose) {
|
|
6392
|
+
console.info('๐ฌ OpenAI callChatModel call', { prompt: prompt });
|
|
6393
|
+
}
|
|
6394
|
+
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements, expectFormat = prompt.expectFormat;
|
|
6395
|
+
// TODO: [โ] Use here more modelRequirements
|
|
6396
|
+
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6397
|
+
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6398
|
+
}
|
|
6399
|
+
model = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6400
|
+
modelSettings = {
|
|
6401
|
+
model: model,
|
|
6402
|
+
max_tokens: modelRequirements.maxTokens,
|
|
6403
|
+
// <- TODO: [๐พ] Make some global max cap for maxTokens
|
|
6404
|
+
temperature: modelRequirements.temperature,
|
|
6405
|
+
// <- TODO: [๐] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6406
|
+
// <- Note: [๐ง]
|
|
6407
|
+
};
|
|
6408
|
+
if (expectFormat === 'JSON') {
|
|
6409
|
+
modelSettings.response_format = {
|
|
6410
|
+
type: 'json_object',
|
|
6411
|
+
};
|
|
6412
|
+
}
|
|
6413
|
+
rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
6414
|
+
? []
|
|
6415
|
+
: [
|
|
6416
|
+
{
|
|
6417
|
+
role: 'system',
|
|
6418
|
+
content: modelRequirements.systemMessage,
|
|
6419
|
+
},
|
|
6420
|
+
])), false), [
|
|
6421
|
+
{
|
|
6422
|
+
role: 'user',
|
|
6423
|
+
content: replaceParameters(content, parameters),
|
|
6424
|
+
},
|
|
6425
|
+
], false), user: this.options.user });
|
|
6426
|
+
start = getCurrentIsoDate();
|
|
6427
|
+
if (this.options.isVerbose) {
|
|
6428
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
6429
|
+
}
|
|
6430
|
+
return [4 /*yield*/, this.client.chat.completions.create(rawRequest)];
|
|
6431
|
+
case 1:
|
|
6432
|
+
rawResponse = _a.sent();
|
|
6433
|
+
if (this.options.isVerbose) {
|
|
6434
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
6435
|
+
}
|
|
6436
|
+
if (!rawResponse.choices[0]) {
|
|
6437
|
+
throw new PipelineExecutionError('No choises from OpenAI');
|
|
6438
|
+
}
|
|
6439
|
+
if (rawResponse.choices.length > 1) {
|
|
6440
|
+
// TODO: This should be maybe only warning
|
|
6441
|
+
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
6442
|
+
}
|
|
6443
|
+
resultContent = rawResponse.choices[0].message.content;
|
|
6444
|
+
// eslint-disable-next-line prefer-const
|
|
6445
|
+
complete = getCurrentIsoDate();
|
|
6446
|
+
usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
|
|
6447
|
+
if (resultContent === null) {
|
|
6448
|
+
throw new PipelineExecutionError('No response message from OpenAI');
|
|
6449
|
+
}
|
|
6450
|
+
return [2 /*return*/, {
|
|
6451
|
+
content: resultContent,
|
|
6452
|
+
modelName: rawResponse.model || model,
|
|
6453
|
+
timing: {
|
|
6454
|
+
start: start,
|
|
6455
|
+
complete: complete,
|
|
6456
|
+
},
|
|
6457
|
+
usage: usage,
|
|
6458
|
+
rawResponse: rawResponse,
|
|
6459
|
+
// <- [๐คนโโ๏ธ]
|
|
6460
|
+
}];
|
|
6461
|
+
}
|
|
6462
|
+
});
|
|
6463
|
+
});
|
|
6464
|
+
};
|
|
6465
|
+
/**
|
|
6466
|
+
* Calls OpenAI API to use a complete model.
|
|
6467
|
+
*/
|
|
6468
|
+
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6469
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6470
|
+
var content, parameters, modelRequirements, model, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6471
|
+
return __generator(this, function (_a) {
|
|
6472
|
+
switch (_a.label) {
|
|
6473
|
+
case 0:
|
|
6474
|
+
if (this.options.isVerbose) {
|
|
6475
|
+
console.info('๐ OpenAI callCompletionModel call', { prompt: prompt });
|
|
6476
|
+
}
|
|
6477
|
+
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
6478
|
+
// TODO: [โ] Use here more modelRequirements
|
|
6479
|
+
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
6480
|
+
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6481
|
+
}
|
|
6482
|
+
model = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
6483
|
+
modelSettings = {
|
|
6484
|
+
model: model,
|
|
6485
|
+
max_tokens: modelRequirements.maxTokens || 2000,
|
|
6486
|
+
// <- TODO: [๐พ] Make some global max cap for maxTokens
|
|
6487
|
+
temperature: modelRequirements.temperature,
|
|
6488
|
+
// <- TODO: [๐] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6489
|
+
// <- Note: [๐ง]
|
|
6490
|
+
};
|
|
6491
|
+
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
|
|
6492
|
+
start = getCurrentIsoDate();
|
|
6493
|
+
if (this.options.isVerbose) {
|
|
6494
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
6495
|
+
}
|
|
6496
|
+
return [4 /*yield*/, this.client.completions.create(rawRequest)];
|
|
6497
|
+
case 1:
|
|
6498
|
+
rawResponse = _a.sent();
|
|
6499
|
+
if (this.options.isVerbose) {
|
|
6500
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
6501
|
+
}
|
|
6502
|
+
if (!rawResponse.choices[0]) {
|
|
6503
|
+
throw new PipelineExecutionError('No choises from OpenAI');
|
|
6504
|
+
}
|
|
6505
|
+
if (rawResponse.choices.length > 1) {
|
|
6506
|
+
// TODO: This should be maybe only warning
|
|
6507
|
+
throw new PipelineExecutionError('More than one choise from OpenAI');
|
|
6508
|
+
}
|
|
6509
|
+
resultContent = rawResponse.choices[0].text;
|
|
6510
|
+
// eslint-disable-next-line prefer-const
|
|
6511
|
+
complete = getCurrentIsoDate();
|
|
6512
|
+
usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
|
|
6513
|
+
return [2 /*return*/, {
|
|
6514
|
+
content: resultContent,
|
|
6515
|
+
modelName: rawResponse.model || model,
|
|
6516
|
+
timing: {
|
|
6517
|
+
start: start,
|
|
6518
|
+
complete: complete,
|
|
6519
|
+
},
|
|
6520
|
+
usage: usage,
|
|
6521
|
+
rawResponse: rawResponse,
|
|
6522
|
+
// <- [๐คนโโ๏ธ]
|
|
6523
|
+
}];
|
|
6524
|
+
}
|
|
6525
|
+
});
|
|
6526
|
+
});
|
|
6527
|
+
};
|
|
6528
|
+
/**
|
|
6529
|
+
* Calls OpenAI API to use a embedding model
|
|
6530
|
+
*/
|
|
6531
|
+
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6532
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
6533
|
+
var content, parameters, modelRequirements, model, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6534
|
+
return __generator(this, function (_a) {
|
|
6535
|
+
switch (_a.label) {
|
|
6536
|
+
case 0:
|
|
6537
|
+
if (this.options.isVerbose) {
|
|
6538
|
+
console.info('๐ OpenAI embedding call', { prompt: prompt });
|
|
6539
|
+
}
|
|
6540
|
+
content = prompt.content, parameters = prompt.parameters, modelRequirements = prompt.modelRequirements;
|
|
6541
|
+
// TODO: [โ] Use here more modelRequirements
|
|
6542
|
+
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
6543
|
+
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
6544
|
+
}
|
|
6545
|
+
model = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
6546
|
+
rawRequest = {
|
|
6547
|
+
input: replaceParameters(content, parameters),
|
|
6548
|
+
model: model,
|
|
6549
|
+
// TODO: !!!! Test model 3 and dimensions
|
|
6550
|
+
};
|
|
6551
|
+
start = getCurrentIsoDate();
|
|
6552
|
+
if (this.options.isVerbose) {
|
|
6553
|
+
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
6554
|
+
}
|
|
6555
|
+
return [4 /*yield*/, this.client.embeddings.create(rawRequest)];
|
|
6556
|
+
case 1:
|
|
6557
|
+
rawResponse = _a.sent();
|
|
6558
|
+
if (this.options.isVerbose) {
|
|
6559
|
+
console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
6560
|
+
}
|
|
6561
|
+
if (rawResponse.data.length !== 1) {
|
|
6562
|
+
throw new PipelineExecutionError("Expected exactly 1 data item in response, got ".concat(rawResponse.data.length));
|
|
6563
|
+
}
|
|
6564
|
+
resultContent = rawResponse.data[0].embedding;
|
|
6565
|
+
// eslint-disable-next-line prefer-const
|
|
6566
|
+
complete = getCurrentIsoDate();
|
|
6567
|
+
usage = computeOpenaiUsage(content, '', rawResponse);
|
|
6568
|
+
return [2 /*return*/, {
|
|
6569
|
+
content: resultContent,
|
|
6570
|
+
modelName: rawResponse.model || model,
|
|
6571
|
+
timing: {
|
|
6572
|
+
start: start,
|
|
6573
|
+
complete: complete,
|
|
6574
|
+
},
|
|
6575
|
+
usage: usage,
|
|
6576
|
+
rawResponse: rawResponse,
|
|
6577
|
+
// <- [๐คนโโ๏ธ]
|
|
6578
|
+
}];
|
|
6579
|
+
}
|
|
6580
|
+
});
|
|
6581
|
+
});
|
|
6582
|
+
};
|
|
6583
|
+
// <- Note: [๐ค] callXxxModel
|
|
6584
|
+
/**
|
|
6585
|
+
* Get the model that should be used as default
|
|
6586
|
+
*/
|
|
6587
|
+
OpenAiExecutionTools.prototype.getDefaultModel = function (defaultModelName) {
|
|
6588
|
+
var model = OPENAI_MODELS.find(function (_a) {
|
|
6589
|
+
var modelName = _a.modelName;
|
|
6590
|
+
return modelName === defaultModelName;
|
|
6591
|
+
});
|
|
6592
|
+
if (model === undefined) {
|
|
6593
|
+
throw new UnexpectedError(spaceTrim(function (block) {
|
|
6594
|
+
return "\n Cannot find model in OpenAI models with name \"".concat(defaultModelName, "\" which should be used as default.\n\n Available models:\n ").concat(block(OPENAI_MODELS.map(function (_a) {
|
|
6595
|
+
var modelName = _a.modelName;
|
|
6596
|
+
return "- \"".concat(modelName, "\"");
|
|
6597
|
+
}).join('\n')), "\n\n ");
|
|
6598
|
+
}));
|
|
6599
|
+
}
|
|
6600
|
+
return model;
|
|
6601
|
+
};
|
|
6602
|
+
/**
|
|
6603
|
+
* Default model for chat variant.
|
|
6604
|
+
*/
|
|
6605
|
+
OpenAiExecutionTools.prototype.getDefaultChatModel = function () {
|
|
6606
|
+
return this.getDefaultModel('gpt-4o');
|
|
6607
|
+
};
|
|
6608
|
+
/**
|
|
6609
|
+
* Default model for completion variant.
|
|
6610
|
+
*/
|
|
6611
|
+
OpenAiExecutionTools.prototype.getDefaultCompletionModel = function () {
|
|
6612
|
+
return this.getDefaultModel('gpt-3.5-turbo-instruct');
|
|
6613
|
+
};
|
|
6614
|
+
/**
|
|
6615
|
+
* Default model for completion variant.
|
|
6616
|
+
*/
|
|
6617
|
+
OpenAiExecutionTools.prototype.getDefaultEmbeddingModel = function () {
|
|
6618
|
+
return this.getDefaultModel('text-embedding-3-large');
|
|
6619
|
+
};
|
|
6620
|
+
// <- Note: [๐ค] getDefaultXxxModel
|
|
6621
|
+
/**
|
|
6622
|
+
* List all available OpenAI models that can be used
|
|
6623
|
+
*/
|
|
6624
|
+
OpenAiExecutionTools.prototype.listModels = function () {
|
|
6625
|
+
/*
|
|
6626
|
+
Note: Dynamic lising of the models
|
|
6627
|
+
const models = await this.openai.models.list({});
|
|
6628
|
+
|
|
6629
|
+
console.log({ models });
|
|
6630
|
+
console.log(models.data);
|
|
6631
|
+
*/
|
|
6632
|
+
return OPENAI_MODELS;
|
|
6633
|
+
};
|
|
6634
|
+
return OpenAiExecutionTools;
|
|
6635
|
+
}());
|
|
6636
|
+
/**
|
|
6637
|
+
* TODO: [๐ง ][๐งโโ๏ธ] Maybe there can be some wizzard for thoose who want to use just OpenAI
|
|
6638
|
+
* TODO: Maybe Create some common util for callChatModel and callCompletionModel
|
|
6639
|
+
* TODO: Maybe make custom OpenaiError
|
|
6640
|
+
* TODO: [๐ง ][๐] Maybe use `isDeterministic` from options
|
|
6641
|
+
*/
|
|
6642
|
+
|
|
6643
|
+
/**
|
|
6644
|
+
* @@@
|
|
6645
|
+
*
|
|
6646
|
+
* Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
|
|
6647
|
+
*
|
|
6648
|
+
* It looks for environment variables:
|
|
6649
|
+
* - `process.env.OPENAI_API_KEY`
|
|
6650
|
+
* - `process.env.ANTHROPIC_CLAUDE_API_KEY`
|
|
6651
|
+
*
|
|
6652
|
+
* @returns @@@
|
|
6653
|
+
*/
|
|
6654
|
+
function createLlmToolsFromEnv(options) {
|
|
6655
|
+
if (options === void 0) { options = {}; }
|
|
6656
|
+
if (!isRunningInNode()) {
|
|
6657
|
+
throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
|
|
6658
|
+
}
|
|
6659
|
+
var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
|
|
6660
|
+
var llmTools = [];
|
|
6661
|
+
if (typeof process.env.OPENAI_API_KEY === 'string') {
|
|
6662
|
+
llmTools.push(new OpenAiExecutionTools({
|
|
6663
|
+
isVerbose: isVerbose,
|
|
6664
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
6665
|
+
}));
|
|
6666
|
+
}
|
|
6667
|
+
if (typeof process.env.ANTHROPIC_CLAUDE_API_KEY === 'string') {
|
|
6668
|
+
llmTools.push(new AnthropicClaudeExecutionTools({
|
|
6669
|
+
isVerbose: isVerbose,
|
|
6670
|
+
apiKey: process.env.ANTHROPIC_CLAUDE_API_KEY,
|
|
6671
|
+
}));
|
|
6672
|
+
}
|
|
6673
|
+
if (llmTools.length === 0) {
|
|
6674
|
+
throw new Error(spaceTrim("\n No LLM tools found in the environment\n\n Please set one of environment variables:\n - OPENAI_API_KEY\n - ANTHROPIC_CLAUDE_API_KEY\n "));
|
|
6675
|
+
}
|
|
6676
|
+
else if (llmTools.length === 1) {
|
|
6677
|
+
return llmTools[0];
|
|
6678
|
+
}
|
|
6679
|
+
else {
|
|
6680
|
+
return joinLlmExecutionTools.apply(void 0, __spreadArray([], __read(llmTools), false));
|
|
6681
|
+
}
|
|
6682
|
+
}
|
|
6683
|
+
/**
|
|
6684
|
+
* TODO: [๐ผ] !!! Export via `@promptbook/node`
|
|
6685
|
+
* TODO: @@@ write discussion about this - wizzard
|
|
6686
|
+
* TODO: Add Azure
|
|
6687
|
+
* TODO: [๐ง ] Which name is better `createLlmToolsFromEnv` or `createLlmToolsFromEnvironment`?
|
|
6688
|
+
* TODO: [๐ง ] Is there some meaningfull way how to test this util
|
|
6689
|
+
* TODO: [๐ง ] Maybe pass env as argument
|
|
6690
|
+
* Note: [๐ข] This code should never be published outside of `@promptbook/node`
|
|
6691
|
+
*/
|
|
6692
|
+
|
|
6693
|
+
export { PROMPTBOOK_VERSION, createCollectionFromDirectory, createLlmToolsFromEnv };
|
|
5365
6694
|
//# sourceMappingURL=index.es.js.map
|