@promptbook/cli 0.61.0-23 → 0.61.0-25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -3
- package/esm/index.es.js +191 -91
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/config.d.ts +5 -1
- package/esm/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
- package/esm/typings/src/conversion/utils/extractParametersFromPromptTemplate.d.ts +1 -1
- package/esm/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
- package/esm/typings/src/execution/PromptResultUsage.d.ts +3 -0
- package/esm/typings/src/execution/createPipelineExecutor.d.ts +2 -0
- package/esm/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
- package/esm/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/esm/typings/src/prepare/prepareTemplates.d.ts +31 -0
- package/esm/typings/src/prepare/unpreparePipeline.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
- package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
- package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
- package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
- package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
- package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
- package/esm/typings/src/utils/extractParameters.d.ts +3 -0
- package/package.json +2 -2
- package/umd/index.umd.js +191 -91
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/src/config.d.ts +5 -1
- package/umd/typings/src/conversion/pipelineStringToJsonSync.d.ts +1 -0
- package/umd/typings/src/conversion/utils/extractParametersFromPromptTemplate.d.ts +1 -1
- package/umd/typings/src/conversion/utils/stringifyPipelineJson.d.ts +1 -0
- package/umd/typings/src/execution/PromptResultUsage.d.ts +3 -0
- package/umd/typings/src/execution/createPipelineExecutor.d.ts +2 -0
- package/umd/typings/src/knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown.d.ts +1 -1
- package/umd/typings/src/prepare/preparePipeline.d.ts +0 -1
- package/umd/typings/src/prepare/prepareTemplates.d.ts +31 -0
- package/umd/typings/src/prepare/unpreparePipeline.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PersonaJson.d.ts +3 -0
- package/umd/typings/src/types/PipelineJson/PipelineJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/PromptDialogJson.d.ts +4 -3
- package/umd/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +10 -0
- package/umd/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +1 -0
- package/umd/typings/src/types/PipelineJson/ScriptJson.d.ts +5 -2
- package/umd/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +5 -2
- package/umd/typings/src/utils/extractParameters.d.ts +3 -0
package/umd/index.umd.js
CHANGED
|
@@ -154,7 +154,7 @@
|
|
|
154
154
|
/**
|
|
155
155
|
* The version of the Promptbook library
|
|
156
156
|
*/
|
|
157
|
-
var PROMPTBOOK_VERSION = '0.61.0-
|
|
157
|
+
var PROMPTBOOK_VERSION = '0.61.0-24';
|
|
158
158
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
159
159
|
|
|
160
160
|
/**
|
|
@@ -291,6 +291,7 @@
|
|
|
291
291
|
* The names of the parameters that are reserved for special purposes
|
|
292
292
|
*/
|
|
293
293
|
var RESERVED_PARAMETER_NAMES = deepFreeze([
|
|
294
|
+
'content',
|
|
294
295
|
'context',
|
|
295
296
|
'knowledge',
|
|
296
297
|
'samples',
|
|
@@ -303,6 +304,10 @@
|
|
|
303
304
|
* @@@
|
|
304
305
|
*/
|
|
305
306
|
var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
307
|
+
/**
|
|
308
|
+
* @@@
|
|
309
|
+
*/
|
|
310
|
+
var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
|
|
306
311
|
/*
|
|
307
312
|
TODO: !!! Just testing false-negative detection of [🟡][🟢][🔵][⚪] leak
|
|
308
313
|
*/
|
|
@@ -746,7 +751,7 @@
|
|
|
746
751
|
});
|
|
747
752
|
}
|
|
748
753
|
|
|
749
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
754
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-24",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-24",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
750
755
|
|
|
751
756
|
/**
|
|
752
757
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -1180,19 +1185,58 @@
|
|
|
1180
1185
|
return ReferenceError;
|
|
1181
1186
|
}(Error));
|
|
1182
1187
|
|
|
1188
|
+
/**
|
|
1189
|
+
* Parses the template and returns the list of all parameter names
|
|
1190
|
+
*
|
|
1191
|
+
* @param template the template with parameters in {curly} braces
|
|
1192
|
+
* @returns the list of parameter names
|
|
1193
|
+
*/
|
|
1194
|
+
function extractParameters(template) {
|
|
1195
|
+
var e_1, _a;
|
|
1196
|
+
var matches = template.matchAll(/{\w+}/g);
|
|
1197
|
+
var parameterNames = new Set();
|
|
1198
|
+
try {
|
|
1199
|
+
for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
|
|
1200
|
+
var match = matches_1_1.value;
|
|
1201
|
+
var parameterName = match[0].slice(1, -1);
|
|
1202
|
+
parameterNames.add(parameterName);
|
|
1203
|
+
}
|
|
1204
|
+
}
|
|
1205
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
1206
|
+
finally {
|
|
1207
|
+
try {
|
|
1208
|
+
if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
|
|
1209
|
+
}
|
|
1210
|
+
finally { if (e_1) throw e_1.error; }
|
|
1211
|
+
}
|
|
1212
|
+
return parameterNames;
|
|
1213
|
+
}
|
|
1214
|
+
/**
|
|
1215
|
+
* TODO: !!!!! Rename to extractParameterNames
|
|
1216
|
+
*/
|
|
1217
|
+
|
|
1183
1218
|
/**
|
|
1184
1219
|
* Unprepare just strips the preparation data of the pipeline
|
|
1185
1220
|
*/
|
|
1186
1221
|
function unpreparePipeline(pipeline) {
|
|
1187
|
-
var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources;
|
|
1222
|
+
var personas = pipeline.personas, knowledgeSources = pipeline.knowledgeSources, promptTemplates = pipeline.promptTemplates;
|
|
1188
1223
|
personas = personas.map(function (persona) { return (__assign(__assign({}, persona), { modelRequirements: undefined, preparationIds: undefined })); });
|
|
1189
1224
|
knowledgeSources = knowledgeSources.map(function (knowledgeSource) { return (__assign(__assign({}, knowledgeSource), { preparationIds: undefined })); });
|
|
1190
|
-
|
|
1225
|
+
promptTemplates = promptTemplates.map(function (promptTemplate) {
|
|
1226
|
+
var dependentParameterNames = promptTemplate.dependentParameterNames;
|
|
1227
|
+
var parameterNames = extractParameters(promptTemplate.preparedContent || '');
|
|
1228
|
+
dependentParameterNames = dependentParameterNames.filter(function (dependentParameterName) { return !parameterNames.has(dependentParameterName); });
|
|
1229
|
+
var promptTemplateUnprepared = __assign(__assign({}, promptTemplate), { dependentParameterNames: dependentParameterNames });
|
|
1230
|
+
delete promptTemplateUnprepared.preparedContent;
|
|
1231
|
+
return promptTemplateUnprepared;
|
|
1232
|
+
});
|
|
1233
|
+
return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
|
|
1191
1234
|
}
|
|
1192
1235
|
/**
|
|
1193
1236
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
1194
1237
|
* TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
|
|
1195
1238
|
* TODO: Write tests for `preparePipeline`
|
|
1239
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
1196
1240
|
*/
|
|
1197
1241
|
|
|
1198
1242
|
/**
|
|
@@ -1682,33 +1726,6 @@
|
|
|
1682
1726
|
* TODO: [🧠] Can this return type be better typed than void
|
|
1683
1727
|
*/
|
|
1684
1728
|
|
|
1685
|
-
/**
|
|
1686
|
-
* Parses the template and returns the list of all parameter names
|
|
1687
|
-
*
|
|
1688
|
-
* @param template the template with parameters in {curly} braces
|
|
1689
|
-
* @returns the list of parameter names
|
|
1690
|
-
*/
|
|
1691
|
-
function extractParameters(template) {
|
|
1692
|
-
var e_1, _a;
|
|
1693
|
-
var matches = template.matchAll(/{\w+}/g);
|
|
1694
|
-
var parameterNames = new Set();
|
|
1695
|
-
try {
|
|
1696
|
-
for (var matches_1 = __values(matches), matches_1_1 = matches_1.next(); !matches_1_1.done; matches_1_1 = matches_1.next()) {
|
|
1697
|
-
var match = matches_1_1.value;
|
|
1698
|
-
var parameterName = match[0].slice(1, -1);
|
|
1699
|
-
parameterNames.add(parameterName);
|
|
1700
|
-
}
|
|
1701
|
-
}
|
|
1702
|
-
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
1703
|
-
finally {
|
|
1704
|
-
try {
|
|
1705
|
-
if (matches_1_1 && !matches_1_1.done && (_a = matches_1.return)) _a.call(matches_1);
|
|
1706
|
-
}
|
|
1707
|
-
finally { if (e_1) throw e_1.error; }
|
|
1708
|
-
}
|
|
1709
|
-
return parameterNames;
|
|
1710
|
-
}
|
|
1711
|
-
|
|
1712
1729
|
/**
|
|
1713
1730
|
* Parses the given script and returns the list of all used variables that are not defined in the script
|
|
1714
1731
|
*
|
|
@@ -1766,10 +1783,10 @@
|
|
|
1766
1783
|
*/
|
|
1767
1784
|
function extractParametersFromPromptTemplate(promptTemplate) {
|
|
1768
1785
|
var e_1, _a, e_2, _b, e_3, _c;
|
|
1769
|
-
var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, jokerParameterNames = promptTemplate.jokerParameterNames;
|
|
1786
|
+
var title = promptTemplate.title, description = promptTemplate.description, blockType = promptTemplate.blockType, content = promptTemplate.content, preparedContent = promptTemplate.preparedContent, jokerParameterNames = promptTemplate.jokerParameterNames;
|
|
1770
1787
|
var parameterNames = new Set();
|
|
1771
1788
|
try {
|
|
1772
|
-
for (var _d = __values(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(title)), false), __read(extractParameters(description || '')), false), __read(extractParameters(content)), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
|
|
1789
|
+
for (var _d = __values(__spreadArray(__spreadArray(__spreadArray(__spreadArray([], __read(extractParameters(title)), false), __read(extractParameters(description || '')), false), __read(extractParameters(content)), false), __read(extractParameters(preparedContent || '')), false)), _e = _d.next(); !_e.done; _e = _d.next()) {
|
|
1773
1790
|
var parameterName = _e.value;
|
|
1774
1791
|
parameterNames.add(parameterName);
|
|
1775
1792
|
}
|
|
@@ -1809,6 +1826,8 @@
|
|
|
1809
1826
|
}
|
|
1810
1827
|
finally { if (e_3) throw e_3.error; }
|
|
1811
1828
|
}
|
|
1829
|
+
parameterNames.delete('content');
|
|
1830
|
+
// <- Note {websiteContent} is used in `preparedContent`
|
|
1812
1831
|
return parameterNames;
|
|
1813
1832
|
}
|
|
1814
1833
|
/**
|
|
@@ -1978,7 +1997,14 @@
|
|
|
1978
1997
|
throw errors[0];
|
|
1979
1998
|
}
|
|
1980
1999
|
else if (errors.length > 1) {
|
|
1981
|
-
throw new PipelineExecutionError(
|
|
2000
|
+
throw new PipelineExecutionError(
|
|
2001
|
+
// TODO: Tell which execution tools failed like
|
|
2002
|
+
// 1) OpenAI throw PipelineExecutionError: Parameter {knowledge} is not defined
|
|
2003
|
+
// 2) AnthropicClaude throw PipelineExecutionError: Parameter {knowledge} is not defined
|
|
2004
|
+
// 3) ...
|
|
2005
|
+
spaceTrim__default["default"](function (block) { return "\n All execution tools failed:\n\n ".concat(block(errors
|
|
2006
|
+
.map(function (error, i) { return "".concat(i + 1, ") **").concat(error.name || 'Error', ":** ").concat(error.message); })
|
|
2007
|
+
.join('\n')), "\n\n "); }));
|
|
1982
2008
|
}
|
|
1983
2009
|
else {
|
|
1984
2010
|
throw new PipelineExecutionError(spaceTrim__default["default"](function (block) { return "\n You have not provided any `LlmExecutionTools` that support model variant \"".concat(prompt.modelRequirements.modelVariant, "\n\n Available `LlmExecutionTools`:\n ").concat(block(_this.llmExecutionTools
|
|
@@ -2099,6 +2125,12 @@
|
|
|
2099
2125
|
if (!pipeline.knowledgeSources.every(function (knowledgeSource) { return knowledgeSource.preparationIds !== undefined; })) {
|
|
2100
2126
|
return false;
|
|
2101
2127
|
}
|
|
2128
|
+
/*
|
|
2129
|
+
TODO: [🧠][🍫] `promptTemplates` can not be determined if they are fully prepared SO ignoring them
|
|
2130
|
+
> if (!pipeline.promptTemplates.every(({ preparedContent }) => preparedContent === undefined)) {
|
|
2131
|
+
> return false;
|
|
2132
|
+
> }
|
|
2133
|
+
*/
|
|
2102
2134
|
return true;
|
|
2103
2135
|
}
|
|
2104
2136
|
/**
|
|
@@ -2177,6 +2209,10 @@
|
|
|
2177
2209
|
if (parameterValue === RESERVED_PARAMETER_MISSING_VALUE) {
|
|
2178
2210
|
throw new UnexpectedError("Parameter {".concat(parameterName, "} has missing value"));
|
|
2179
2211
|
}
|
|
2212
|
+
else if (parameterValue === RESERVED_PARAMETER_RESTRICTED) {
|
|
2213
|
+
// TODO: [🍵]
|
|
2214
|
+
throw new UnexpectedError("Parameter {".concat(parameterName, "} is restricted to use"));
|
|
2215
|
+
}
|
|
2180
2216
|
}
|
|
2181
2217
|
}
|
|
2182
2218
|
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
@@ -2435,16 +2471,17 @@
|
|
|
2435
2471
|
pipeline = rawPipeline;
|
|
2436
2472
|
}
|
|
2437
2473
|
else {
|
|
2438
|
-
|
|
2474
|
+
// TODO: !!!! This should be maybe warning in report
|
|
2475
|
+
console.warn(spaceTrim.spaceTrim("\n Pipeline ".concat(rawPipeline.pipelineUrl || rawPipeline.sourceFile || rawPipeline.title, " is not prepared\n\n ").concat(rawPipeline.sourceFile, "\n\n It will be prepared ad-hoc before the first execution\n But it is recommended to prepare the pipeline during collection preparation\n\n @see more at https://ptbk.io/prepare-pipeline\n ")));
|
|
2439
2476
|
}
|
|
2440
2477
|
var pipelineExecutor = function (inputParameters, onProgress) { return __awaiter(_this, void 0, void 0, function () {
|
|
2441
|
-
// TODO:
|
|
2478
|
+
// TODO: !!! Extract to separate functions and files - ALL FUNCTIONS BELOW
|
|
2442
2479
|
function getContextForTemplate(// <- TODO: [🧠][🥜]
|
|
2443
2480
|
template) {
|
|
2444
2481
|
return __awaiter(this, void 0, void 0, function () {
|
|
2445
2482
|
return __generator(this, function (_a) {
|
|
2446
2483
|
TODO_USE(template);
|
|
2447
|
-
return [2 /*return*/,
|
|
2484
|
+
return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
|
|
2448
2485
|
});
|
|
2449
2486
|
});
|
|
2450
2487
|
}
|
|
@@ -2467,7 +2504,7 @@
|
|
|
2467
2504
|
return __generator(this, function (_a) {
|
|
2468
2505
|
// TODO: !!!! Implement Better - use real index and keyword search
|
|
2469
2506
|
TODO_USE(template);
|
|
2470
|
-
return [2 /*return*/,
|
|
2507
|
+
return [2 /*return*/, RESERVED_PARAMETER_MISSING_VALUE /* <- TODO: !!!! Implement */];
|
|
2471
2508
|
});
|
|
2472
2509
|
});
|
|
2473
2510
|
}
|
|
@@ -2489,6 +2526,7 @@
|
|
|
2489
2526
|
currentDate = new Date().toISOString();
|
|
2490
2527
|
modelName = RESERVED_PARAMETER_MISSING_VALUE;
|
|
2491
2528
|
reservedParameters = {
|
|
2529
|
+
content: RESERVED_PARAMETER_RESTRICTED,
|
|
2492
2530
|
context: context,
|
|
2493
2531
|
knowledge: knowledge,
|
|
2494
2532
|
samples: samples,
|
|
@@ -2518,7 +2556,7 @@
|
|
|
2518
2556
|
}
|
|
2519
2557
|
function executeSingleTemplate(currentTemplate) {
|
|
2520
2558
|
return __awaiter(this, void 0, void 0, function () {
|
|
2521
|
-
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2559
|
+
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
|
|
2522
2560
|
var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
|
|
2523
2561
|
var _this = this;
|
|
2524
2562
|
return __generator(this, function (_u) {
|
|
@@ -2588,6 +2626,9 @@
|
|
|
2588
2626
|
expectError = null;
|
|
2589
2627
|
maxAttempts = currentTemplate.blockType === 'PROMPT_DIALOG' ? Infinity : maxExecutionAttempts;
|
|
2590
2628
|
jokerParameterNames = currentTemplate.jokerParameterNames || [];
|
|
2629
|
+
preparedContent = (currentTemplate.preparedContent || '{content}')
|
|
2630
|
+
.split('{content}')
|
|
2631
|
+
.join(currentTemplate.content);
|
|
2591
2632
|
attempt = -jokerParameterNames.length;
|
|
2592
2633
|
_u.label = 4;
|
|
2593
2634
|
case 4:
|
|
@@ -2622,7 +2663,7 @@
|
|
|
2622
2663
|
}
|
|
2623
2664
|
return [3 /*break*/, 29];
|
|
2624
2665
|
case 6:
|
|
2625
|
-
resultString = replaceParameters(
|
|
2666
|
+
resultString = replaceParameters(preparedContent, parameters);
|
|
2626
2667
|
return [3 /*break*/, 30];
|
|
2627
2668
|
case 7:
|
|
2628
2669
|
prompt = {
|
|
@@ -2631,7 +2672,7 @@
|
|
|
2631
2672
|
? pipeline.pipelineUrl
|
|
2632
2673
|
: 'anonymous' /* <- TODO: [🧠] How to deal with anonymous pipelines, do here some auto-url like SHA-256 based ad-hoc identifier? */, "#").concat(currentTemplate.name),
|
|
2633
2674
|
parameters: parameters,
|
|
2634
|
-
content:
|
|
2675
|
+
content: preparedContent,
|
|
2635
2676
|
modelRequirements: currentTemplate.modelRequirements,
|
|
2636
2677
|
expectations: __assign(__assign({}, (pipeline.personas.find(function (_a) {
|
|
2637
2678
|
var name = _a.name;
|
|
@@ -2753,7 +2794,7 @@
|
|
|
2753
2794
|
_u.trys.push([19, 21, , 22]);
|
|
2754
2795
|
return [4 /*yield*/, scriptTools.execute(deepFreeze({
|
|
2755
2796
|
scriptLanguage: currentTemplate.contentLanguage,
|
|
2756
|
-
script:
|
|
2797
|
+
script: preparedContent,
|
|
2757
2798
|
parameters: parameters,
|
|
2758
2799
|
}))];
|
|
2759
2800
|
case 20:
|
|
@@ -2802,7 +2843,7 @@
|
|
|
2802
2843
|
return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
|
|
2803
2844
|
promptTitle: currentTemplate.title,
|
|
2804
2845
|
promptMessage: replaceParameters(currentTemplate.description || '', parameters),
|
|
2805
|
-
defaultValue: replaceParameters(
|
|
2846
|
+
defaultValue: replaceParameters(preparedContent, parameters),
|
|
2806
2847
|
// TODO: [🧠] !! Figure out how to define placeholder in .ptbk.md file
|
|
2807
2848
|
placeholder: undefined,
|
|
2808
2849
|
priority: priority,
|
|
@@ -2980,7 +3021,7 @@
|
|
|
2980
3021
|
var parameter = _c.value;
|
|
2981
3022
|
if (parametersToPass[parameter.name] === undefined) {
|
|
2982
3023
|
// [4]
|
|
2983
|
-
warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not
|
|
3024
|
+
warnings.push(new PipelineExecutionError("Parameter {".concat(parameter.name, "} should be an output parameter, but it was not generated during pipeline execution")));
|
|
2984
3025
|
continue;
|
|
2985
3026
|
}
|
|
2986
3027
|
outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
|
|
@@ -2995,7 +3036,7 @@
|
|
|
2995
3036
|
}
|
|
2996
3037
|
return outputParameters;
|
|
2997
3038
|
}
|
|
2998
|
-
var executionReport, _a, _b, parameter,
|
|
3039
|
+
var errors, warnings, executionReport, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
|
|
2999
3040
|
var e_1, _e, e_2, _f;
|
|
3000
3041
|
return __generator(this, function (_g) {
|
|
3001
3042
|
switch (_g.label) {
|
|
@@ -3010,6 +3051,8 @@
|
|
|
3010
3051
|
pipeline = _g.sent();
|
|
3011
3052
|
_g.label = 2;
|
|
3012
3053
|
case 2:
|
|
3054
|
+
errors = [];
|
|
3055
|
+
warnings = [];
|
|
3013
3056
|
executionReport = {
|
|
3014
3057
|
pipelineUrl: pipeline.pipelineUrl,
|
|
3015
3058
|
title: pipeline.title,
|
|
@@ -3028,9 +3071,9 @@
|
|
|
3028
3071
|
if (inputParameters[parameter.name] === undefined) {
|
|
3029
3072
|
return [2 /*return*/, deepFreezeWithSameType({
|
|
3030
3073
|
isSuccessful: false,
|
|
3031
|
-
errors: [
|
|
3032
|
-
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
|
|
3033
|
-
],
|
|
3074
|
+
errors: __spreadArray([
|
|
3075
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
|
|
3076
|
+
], __read(errors), false),
|
|
3034
3077
|
warnings: [],
|
|
3035
3078
|
executionReport: executionReport,
|
|
3036
3079
|
outputParameters: {},
|
|
@@ -3046,8 +3089,6 @@
|
|
|
3046
3089
|
}
|
|
3047
3090
|
finally { if (e_1) throw e_1.error; }
|
|
3048
3091
|
}
|
|
3049
|
-
errors = [];
|
|
3050
|
-
warnings = [];
|
|
3051
3092
|
_loop_1 = function (parameterName) {
|
|
3052
3093
|
var parameter = pipeline.parameters.find(function (_a) {
|
|
3053
3094
|
var name = _a.name;
|
|
@@ -3059,9 +3100,9 @@
|
|
|
3059
3100
|
else if (parameter.isInput === false) {
|
|
3060
3101
|
return { value: deepFreezeWithSameType({
|
|
3061
3102
|
isSuccessful: false,
|
|
3062
|
-
errors: [
|
|
3063
|
-
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
|
|
3064
|
-
],
|
|
3103
|
+
errors: __spreadArray([
|
|
3104
|
+
new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
|
|
3105
|
+
], __read(errors), false),
|
|
3065
3106
|
warnings: warnings,
|
|
3066
3107
|
executionReport: executionReport,
|
|
3067
3108
|
outputParameters: {},
|
|
@@ -3196,6 +3237,8 @@
|
|
|
3196
3237
|
return pipelineExecutor;
|
|
3197
3238
|
}
|
|
3198
3239
|
/**
|
|
3240
|
+
* TODO: !!!! return `preparedPipeline` from execution
|
|
3241
|
+
* TODO: !!!! `isNotPreparedWarningSupressed`
|
|
3199
3242
|
* TODO: Use isVerbose here (not only pass to `preparePipeline`)
|
|
3200
3243
|
* TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
|
|
3201
3244
|
* TODO: [♈] Probbably move expectations from templates to parameters
|
|
@@ -3210,7 +3253,7 @@
|
|
|
3210
3253
|
/**
|
|
3211
3254
|
* @@@
|
|
3212
3255
|
*/
|
|
3213
|
-
function prepareKnowledgeFromMarkdown(
|
|
3256
|
+
function prepareKnowledgeFromMarkdown(knowledgeContent /* <- TODO: [🖖] (?maybe not) Always the file */, options) {
|
|
3214
3257
|
return __awaiter(this, void 0, void 0, function () {
|
|
3215
3258
|
var llmTools, _a, maxParallelCount, _b, isVerbose, collection, prepareKnowledgeFromMarkdownExecutor, _c, prepareTitleExecutor, _d, prepareKeywordsExecutor, _e, result, outputParameters, knowledgePiecesRaw, knowledgeTextPieces, knowledge;
|
|
3216
3259
|
var _f, _g, _h;
|
|
@@ -3248,7 +3291,7 @@
|
|
|
3248
3291
|
llm: llmTools,
|
|
3249
3292
|
},
|
|
3250
3293
|
_h)]);
|
|
3251
|
-
return [4 /*yield*/, prepareKnowledgeFromMarkdownExecutor({
|
|
3294
|
+
return [4 /*yield*/, prepareKnowledgeFromMarkdownExecutor({ knowledgeContent: knowledgeContent })];
|
|
3252
3295
|
case 4:
|
|
3253
3296
|
result = _j.sent();
|
|
3254
3297
|
assertsExecutionSuccessful(result);
|
|
@@ -3261,25 +3304,25 @@
|
|
|
3261
3304
|
return [4 /*yield*/, Promise.all(
|
|
3262
3305
|
// TODO: [🪂] !! Do not send all at once but in chunks
|
|
3263
3306
|
knowledgeTextPieces.map(function (knowledgeTextPiece, i) { return __awaiter(_this, void 0, void 0, function () {
|
|
3264
|
-
var name, title,
|
|
3307
|
+
var name, title, knowledgePieceContent, keywords, index, titleResult, _a, titleRaw, keywordsResult, _b, keywordsRaw, embeddingResult, error_1;
|
|
3265
3308
|
return __generator(this, function (_c) {
|
|
3266
3309
|
switch (_c.label) {
|
|
3267
3310
|
case 0:
|
|
3268
3311
|
name = "piece-".concat(i);
|
|
3269
3312
|
title = spaceTrim__default["default"](knowledgeTextPiece.substring(0, 100));
|
|
3270
|
-
|
|
3313
|
+
knowledgePieceContent = spaceTrim__default["default"](knowledgeTextPiece);
|
|
3271
3314
|
keywords = [];
|
|
3272
3315
|
index = [];
|
|
3273
3316
|
_c.label = 1;
|
|
3274
3317
|
case 1:
|
|
3275
3318
|
_c.trys.push([1, 7, , 8]);
|
|
3276
|
-
return [4 /*yield*/, prepareTitleExecutor({
|
|
3319
|
+
return [4 /*yield*/, prepareTitleExecutor({ knowledgePieceContent: knowledgePieceContent })];
|
|
3277
3320
|
case 2:
|
|
3278
3321
|
titleResult = _c.sent();
|
|
3279
3322
|
_a = titleResult.outputParameters.title, titleRaw = _a === void 0 ? 'Untitled' : _a;
|
|
3280
3323
|
title = spaceTrim__default["default"](titleRaw) /* <- TODO: Maybe do in pipeline */;
|
|
3281
3324
|
name = titleToName(title);
|
|
3282
|
-
return [4 /*yield*/, prepareKeywordsExecutor({
|
|
3325
|
+
return [4 /*yield*/, prepareKeywordsExecutor({ knowledgePieceContent: knowledgePieceContent })];
|
|
3283
3326
|
case 3:
|
|
3284
3327
|
keywordsResult = _c.sent();
|
|
3285
3328
|
_b = keywordsResult.outputParameters.keywords, keywordsRaw = _b === void 0 ? '' : _b;
|
|
@@ -3297,7 +3340,7 @@
|
|
|
3297
3340
|
case 4: return [4 /*yield*/, llmTools.callEmbeddingModel({
|
|
3298
3341
|
title: "Embedding for ".concat(title) /* <- Note: No impact on embedding result itself, just for logging */,
|
|
3299
3342
|
parameters: {},
|
|
3300
|
-
content:
|
|
3343
|
+
content: knowledgePieceContent,
|
|
3301
3344
|
modelRequirements: {
|
|
3302
3345
|
modelVariant: 'EMBEDDING',
|
|
3303
3346
|
},
|
|
@@ -3318,7 +3361,7 @@
|
|
|
3318
3361
|
case 8: return [2 /*return*/, {
|
|
3319
3362
|
name: name,
|
|
3320
3363
|
title: title,
|
|
3321
|
-
content:
|
|
3364
|
+
content: knowledgePieceContent,
|
|
3322
3365
|
keywords: keywords,
|
|
3323
3366
|
index: index,
|
|
3324
3367
|
// <- TODO: [☀] sources,
|
|
@@ -3468,6 +3511,57 @@
|
|
|
3468
3511
|
* TODO: [🏢] !! Check validity of `temperature` in pipeline
|
|
3469
3512
|
*/
|
|
3470
3513
|
|
|
3514
|
+
/**
|
|
3515
|
+
* @@@
|
|
3516
|
+
*/
|
|
3517
|
+
function prepareTemplates(pipeline, options) {
|
|
3518
|
+
return __awaiter(this, void 0, void 0, function () {
|
|
3519
|
+
var _a, maxParallelCount, promptTemplates, parameters, knowledgePiecesCount, promptTemplatesPrepared;
|
|
3520
|
+
var _this = this;
|
|
3521
|
+
return __generator(this, function (_b) {
|
|
3522
|
+
switch (_b.label) {
|
|
3523
|
+
case 0:
|
|
3524
|
+
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3525
|
+
promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
|
|
3526
|
+
// TODO: !!!! Apply samples to each template (if missing and is for the template defined)
|
|
3527
|
+
TODO_USE(parameters);
|
|
3528
|
+
promptTemplatesPrepared = new Array(promptTemplates.length);
|
|
3529
|
+
return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
3530
|
+
var dependentParameterNames, preparedContent, preparedTemplate;
|
|
3531
|
+
return __generator(this, function (_a) {
|
|
3532
|
+
dependentParameterNames = template.dependentParameterNames;
|
|
3533
|
+
preparedContent = undefined;
|
|
3534
|
+
if (knowledgePiecesCount > 0 && !dependentParameterNames.includes('knowledge')) {
|
|
3535
|
+
preparedContent = spaceTrim.spaceTrim("\n {content}\n\n ## Knowledge\n\n {knowledge}\n ");
|
|
3536
|
+
// <- TODO: [🧠][🧻] Cutomize shape/language/formatting of the addition to the prompt
|
|
3537
|
+
dependentParameterNames = __spreadArray(__spreadArray([], __read(dependentParameterNames), false), [
|
|
3538
|
+
'knowledge',
|
|
3539
|
+
], false);
|
|
3540
|
+
}
|
|
3541
|
+
preparedTemplate = __assign(__assign({}, template), { dependentParameterNames: dependentParameterNames, preparedContent: preparedContent });
|
|
3542
|
+
promptTemplatesPrepared[index] = preparedTemplate;
|
|
3543
|
+
return [2 /*return*/];
|
|
3544
|
+
});
|
|
3545
|
+
}); })];
|
|
3546
|
+
case 1:
|
|
3547
|
+
_b.sent();
|
|
3548
|
+
return [2 /*return*/, { promptTemplatesPrepared: promptTemplatesPrepared }];
|
|
3549
|
+
}
|
|
3550
|
+
});
|
|
3551
|
+
});
|
|
3552
|
+
}
|
|
3553
|
+
/**
|
|
3554
|
+
* TODO: [🧠] Add context to each template (if missing)
|
|
3555
|
+
* TODO: [🧠] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
|
|
3556
|
+
* TODO: !!!!! Index the samples and maybe templates
|
|
3557
|
+
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
3558
|
+
* TODO: Write tests for `preparePipeline`
|
|
3559
|
+
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
3560
|
+
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
3561
|
+
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
3562
|
+
* TODO: [🧠][🥜]
|
|
3563
|
+
*/
|
|
3564
|
+
|
|
3471
3565
|
/**
|
|
3472
3566
|
* Prepare pipeline from string (markdown) format to JSON format
|
|
3473
3567
|
*
|
|
@@ -3476,18 +3570,18 @@
|
|
|
3476
3570
|
*/
|
|
3477
3571
|
function preparePipeline(pipeline, options) {
|
|
3478
3572
|
return __awaiter(this, void 0, void 0, function () {
|
|
3479
|
-
var _a, maxParallelCount,
|
|
3573
|
+
var _a, maxParallelCount, parameters, promptTemplates,
|
|
3480
3574
|
/*
|
|
3481
3575
|
<- TODO: [🧠][0] `promptbookVersion` */
|
|
3482
3576
|
knowledgeSources /*
|
|
3483
3577
|
<- TODO: [🧊] `knowledgePieces` */, personas /*
|
|
3484
|
-
<- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared
|
|
3578
|
+
<- TODO: [🧊] `preparations` */, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
|
|
3485
3579
|
var _this = this;
|
|
3486
3580
|
return __generator(this, function (_b) {
|
|
3487
3581
|
switch (_b.label) {
|
|
3488
3582
|
case 0:
|
|
3489
3583
|
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3490
|
-
knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3584
|
+
parameters = pipeline.parameters, promptTemplates = pipeline.promptTemplates, knowledgeSources = pipeline.knowledgeSources, personas = pipeline.personas;
|
|
3491
3585
|
currentPreparation = {
|
|
3492
3586
|
id: 1,
|
|
3493
3587
|
// TODO: [🍥]> date: $currentDate(),
|
|
@@ -3520,17 +3614,20 @@
|
|
|
3520
3614
|
case 2:
|
|
3521
3615
|
partialknowledgePiecesPrepared = _b.sent();
|
|
3522
3616
|
knowledgePiecesPrepared = partialknowledgePiecesPrepared.map(function (piece) { return (__assign(__assign({}, piece), { preparationIds: [/* TODO: [🧊] -> */ currentPreparation.id] })); });
|
|
3523
|
-
|
|
3524
|
-
|
|
3525
|
-
|
|
3526
|
-
|
|
3527
|
-
|
|
3617
|
+
return [4 /*yield*/, prepareTemplates({
|
|
3618
|
+
parameters: parameters,
|
|
3619
|
+
promptTemplates: promptTemplates,
|
|
3620
|
+
knowledgePiecesCount: knowledgePiecesPrepared.length,
|
|
3621
|
+
}, options)];
|
|
3622
|
+
case 3:
|
|
3623
|
+
promptTemplatesPrepared = (_b.sent()).promptTemplatesPrepared;
|
|
3624
|
+
// ----- /Templates preparation -----
|
|
3625
|
+
return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
|
|
3528
3626
|
}
|
|
3529
3627
|
});
|
|
3530
3628
|
});
|
|
3531
3629
|
}
|
|
3532
3630
|
/**
|
|
3533
|
-
* TODO: !!!!! Index the samples and maybe templates
|
|
3534
3631
|
* TODO: [🔼] !!! Export via `@promptbook/core`
|
|
3535
3632
|
* TODO: Write tests for `preparePipeline`
|
|
3536
3633
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
@@ -4257,7 +4354,7 @@
|
|
|
4257
4354
|
/**
|
|
4258
4355
|
* Example usages of the PARAMETER command
|
|
4259
4356
|
*/
|
|
4260
|
-
examples: ['PARAMETER {title} Title of the book', 'OUTPUT PARAMETER {
|
|
4357
|
+
examples: ['PARAMETER {title} Title of the book', 'OUTPUT PARAMETER {websiteContent} Content of the book'],
|
|
4261
4358
|
/**
|
|
4262
4359
|
* Parses the PARAMETER command
|
|
4263
4360
|
*/
|
|
@@ -5463,6 +5560,7 @@
|
|
|
5463
5560
|
* TODO: [🥞] Not optimal parsing because `splitMarkdownIntoSections` is executed twice with same string, once through `flattenMarkdown` and second directly here
|
|
5464
5561
|
* TODO: [♈] Probbably move expectations from templates to parameters
|
|
5465
5562
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
5563
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
5466
5564
|
*/
|
|
5467
5565
|
|
|
5468
5566
|
/**
|
|
@@ -6135,7 +6233,7 @@
|
|
|
6135
6233
|
*/
|
|
6136
6234
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6137
6235
|
return __awaiter(this, void 0, void 0, function () {
|
|
6138
|
-
var content, parameters, modelRequirements, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6236
|
+
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6139
6237
|
return __generator(this, function (_a) {
|
|
6140
6238
|
switch (_a.label) {
|
|
6141
6239
|
case 0:
|
|
@@ -6147,6 +6245,7 @@
|
|
|
6147
6245
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6148
6246
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6149
6247
|
}
|
|
6248
|
+
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6150
6249
|
rawRequest = {
|
|
6151
6250
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
6152
6251
|
max_tokens: modelRequirements.maxTokens || 4096,
|
|
@@ -6158,7 +6257,7 @@
|
|
|
6158
6257
|
messages: [
|
|
6159
6258
|
{
|
|
6160
6259
|
role: 'user',
|
|
6161
|
-
content: replaceParameters(content, parameters),
|
|
6260
|
+
content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6162
6261
|
},
|
|
6163
6262
|
],
|
|
6164
6263
|
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
@@ -6219,9 +6318,9 @@
|
|
|
6219
6318
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6220
6319
|
}
|
|
6221
6320
|
|
|
6222
|
-
const
|
|
6321
|
+
const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6223
6322
|
const modelSettings = {
|
|
6224
|
-
model:
|
|
6323
|
+
model: modelName,
|
|
6225
6324
|
max_tokens: modelRequirements.maxTokens || 2000, // <- Note: 2000 is for lagacy reasons
|
|
6226
6325
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6227
6326
|
// <- TODO: Use here `systemMessage`, `temperature` and `seed`
|
|
@@ -6229,7 +6328,7 @@
|
|
|
6229
6328
|
|
|
6230
6329
|
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
6231
6330
|
...modelSettings,
|
|
6232
|
-
prompt: replaceParameters(content, parameters),
|
|
6331
|
+
prompt: replaceParameters(content, { ...parameters, modelName }),
|
|
6233
6332
|
user: this.options.user,
|
|
6234
6333
|
};
|
|
6235
6334
|
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
@@ -6736,7 +6835,7 @@
|
|
|
6736
6835
|
*/
|
|
6737
6836
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6738
6837
|
return __awaiter(this, void 0, void 0, function () {
|
|
6739
|
-
var content, parameters, modelRequirements, expectFormat,
|
|
6838
|
+
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6740
6839
|
return __generator(this, function (_a) {
|
|
6741
6840
|
switch (_a.label) {
|
|
6742
6841
|
case 0:
|
|
@@ -6748,9 +6847,9 @@
|
|
|
6748
6847
|
if (modelRequirements.modelVariant !== 'CHAT') {
|
|
6749
6848
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6750
6849
|
}
|
|
6751
|
-
|
|
6850
|
+
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6752
6851
|
modelSettings = {
|
|
6753
|
-
model:
|
|
6852
|
+
model: modelName,
|
|
6754
6853
|
max_tokens: modelRequirements.maxTokens,
|
|
6755
6854
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6756
6855
|
temperature: modelRequirements.temperature,
|
|
@@ -6772,7 +6871,7 @@
|
|
|
6772
6871
|
])), false), [
|
|
6773
6872
|
{
|
|
6774
6873
|
role: 'user',
|
|
6775
|
-
content: replaceParameters(content, parameters),
|
|
6874
|
+
content: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6776
6875
|
},
|
|
6777
6876
|
], false), user: this.options.user });
|
|
6778
6877
|
start = getCurrentIsoDate();
|
|
@@ -6801,7 +6900,7 @@
|
|
|
6801
6900
|
}
|
|
6802
6901
|
return [2 /*return*/, {
|
|
6803
6902
|
content: resultContent,
|
|
6804
|
-
modelName: rawResponse.model ||
|
|
6903
|
+
modelName: rawResponse.model || modelName,
|
|
6805
6904
|
timing: {
|
|
6806
6905
|
start: start,
|
|
6807
6906
|
complete: complete,
|
|
@@ -6819,7 +6918,7 @@
|
|
|
6819
6918
|
*/
|
|
6820
6919
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6821
6920
|
return __awaiter(this, void 0, void 0, function () {
|
|
6822
|
-
var content, parameters, modelRequirements,
|
|
6921
|
+
var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6823
6922
|
return __generator(this, function (_a) {
|
|
6824
6923
|
switch (_a.label) {
|
|
6825
6924
|
case 0:
|
|
@@ -6831,16 +6930,16 @@
|
|
|
6831
6930
|
if (modelRequirements.modelVariant !== 'COMPLETION') {
|
|
6832
6931
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
6833
6932
|
}
|
|
6834
|
-
|
|
6933
|
+
modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
6835
6934
|
modelSettings = {
|
|
6836
|
-
model:
|
|
6935
|
+
model: modelName,
|
|
6837
6936
|
max_tokens: modelRequirements.maxTokens || 2000,
|
|
6838
6937
|
// <- TODO: [🌾] Make some global max cap for maxTokens
|
|
6839
6938
|
temperature: modelRequirements.temperature,
|
|
6840
6939
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6841
6940
|
// <- Note: [🧆]
|
|
6842
6941
|
};
|
|
6843
|
-
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, parameters), user: this.options.user });
|
|
6942
|
+
rawRequest = __assign(__assign({}, modelSettings), { prompt: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })), user: this.options.user });
|
|
6844
6943
|
start = getCurrentIsoDate();
|
|
6845
6944
|
if (this.options.isVerbose) {
|
|
6846
6945
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
@@ -6864,7 +6963,7 @@
|
|
|
6864
6963
|
usage = computeOpenaiUsage(content, resultContent || '', rawResponse);
|
|
6865
6964
|
return [2 /*return*/, {
|
|
6866
6965
|
content: resultContent,
|
|
6867
|
-
modelName: rawResponse.model ||
|
|
6966
|
+
modelName: rawResponse.model || modelName,
|
|
6868
6967
|
timing: {
|
|
6869
6968
|
start: start,
|
|
6870
6969
|
complete: complete,
|
|
@@ -6882,7 +6981,7 @@
|
|
|
6882
6981
|
*/
|
|
6883
6982
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6884
6983
|
return __awaiter(this, void 0, void 0, function () {
|
|
6885
|
-
var content, parameters, modelRequirements,
|
|
6984
|
+
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6886
6985
|
return __generator(this, function (_a) {
|
|
6887
6986
|
switch (_a.label) {
|
|
6888
6987
|
case 0:
|
|
@@ -6894,10 +6993,10 @@
|
|
|
6894
6993
|
if (modelRequirements.modelVariant !== 'EMBEDDING') {
|
|
6895
6994
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
6896
6995
|
}
|
|
6897
|
-
|
|
6996
|
+
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
6898
6997
|
rawRequest = {
|
|
6899
|
-
input: replaceParameters(content, parameters),
|
|
6900
|
-
model:
|
|
6998
|
+
input: replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName })),
|
|
6999
|
+
model: modelName,
|
|
6901
7000
|
// TODO: !!!! Test model 3 and dimensions
|
|
6902
7001
|
};
|
|
6903
7002
|
start = getCurrentIsoDate();
|
|
@@ -6919,7 +7018,7 @@
|
|
|
6919
7018
|
usage = computeOpenaiUsage(content, '', rawResponse);
|
|
6920
7019
|
return [2 /*return*/, {
|
|
6921
7020
|
content: resultContent,
|
|
6922
|
-
modelName: rawResponse.model ||
|
|
7021
|
+
modelName: rawResponse.model || modelName,
|
|
6923
7022
|
timing: {
|
|
6924
7023
|
start: start,
|
|
6925
7024
|
complete: complete,
|
|
@@ -7242,6 +7341,7 @@
|
|
|
7242
7341
|
* TODO: !!!! Not Working propperly @see https://promptbook.studio/samples/mixed-knowledge.ptbk.md
|
|
7243
7342
|
* TODO: [🧠][0] Maybe rename to `stringifyPipelineJson`, `stringifyIndexedJson`,...
|
|
7244
7343
|
* TODO: [🧠] Maybe more elegant solution than replacing via regex
|
|
7344
|
+
* TODO: [🍙] Make some standart order of json properties
|
|
7245
7345
|
*/
|
|
7246
7346
|
|
|
7247
7347
|
/**
|