@promptbook/node 0.61.0-19 → 0.61.0-20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +16 -11
- package/esm/index.es.js.map +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +16 -11
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -638,7 +638,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
638
638
|
});
|
|
639
639
|
}
|
|
640
640
|
|
|
641
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
641
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-19",parameters:[{name:"content",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledge",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"knowledge"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-19",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-19",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {content}",dependentParameterNames:["content"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-19",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-19",parameters:[{name:"content",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {content}",expectations:{words:{min:1,max:8}},dependentParameterNames:["content"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-19",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-19",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-19",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
642
642
|
|
|
643
643
|
/**
|
|
644
644
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -2181,7 +2181,7 @@ function union() {
|
|
|
2181
2181
|
/**
|
|
2182
2182
|
* The version of the Promptbook library
|
|
2183
2183
|
*/
|
|
2184
|
-
var PROMPTBOOK_VERSION = '0.61.0-
|
|
2184
|
+
var PROMPTBOOK_VERSION = '0.61.0-19';
|
|
2185
2185
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
2186
2186
|
|
|
2187
2187
|
/**
|
|
@@ -2814,7 +2814,7 @@ function createPipelineExecutor(options) {
|
|
|
2814
2814
|
});
|
|
2815
2815
|
});
|
|
2816
2816
|
}
|
|
2817
|
-
var executionReport, _a, _b, parameter, parametersToPass, resovedParameters_1,
|
|
2817
|
+
var executionReport, _a, _b, parameter, parametersToPass, resovedParameters_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_1, error_1, usage_1, outputParameters, errors, _c, _d, parameter, usage;
|
|
2818
2818
|
var e_1, _e, e_2, _f;
|
|
2819
2819
|
return __generator(this, function (_g) {
|
|
2820
2820
|
switch (_g.label) {
|
|
@@ -2875,7 +2875,7 @@ function createPipelineExecutor(options) {
|
|
|
2875
2875
|
var name = _a.name;
|
|
2876
2876
|
return name;
|
|
2877
2877
|
});
|
|
2878
|
-
|
|
2878
|
+
unresovedTemplates_1 = __spreadArray([], __read(pipeline.promptTemplates), false);
|
|
2879
2879
|
resolving_1 = [];
|
|
2880
2880
|
loopLimit = LOOP_LIMIT;
|
|
2881
2881
|
_loop_1 = function () {
|
|
@@ -2887,11 +2887,16 @@ function createPipelineExecutor(options) {
|
|
|
2887
2887
|
// Note: Really UnexpectedError not LimitReachedError - this should be catched during validatePipeline
|
|
2888
2888
|
throw new UnexpectedError('Loop limit reached during resolving parameters pipeline execution');
|
|
2889
2889
|
}
|
|
2890
|
-
currentTemplate =
|
|
2890
|
+
currentTemplate = unresovedTemplates_1.find(function (template) {
|
|
2891
2891
|
return template.dependentParameterNames.every(function (name) { return resovedParameters_1.includes(name); });
|
|
2892
2892
|
});
|
|
2893
2893
|
if (!(!currentTemplate && resolving_1.length === 0)) return [3 /*break*/, 1];
|
|
2894
|
-
throw new UnexpectedError(spaceTrim$1("\n
|
|
2894
|
+
throw new UnexpectedError(spaceTrim$1(function (block) { return "\n Can not resolve some parameters\n\n Note: This should be catched in `validatePipeline`\n\n\n Unresolved parameters:\n ".concat(block(unresovedTemplates_1
|
|
2895
|
+
.map(function (_a) {
|
|
2896
|
+
var resultingParameterName = _a.resultingParameterName;
|
|
2897
|
+
return "- {".concat(resultingParameterName, "}");
|
|
2898
|
+
})
|
|
2899
|
+
.join('\n')), "\n\n Resolved parameters:\n ").concat(block(resovedParameters_1.map(function (name) { return "- {".concat(name, "}"); }).join('\n')), "\n "); }));
|
|
2895
2900
|
case 1:
|
|
2896
2901
|
if (!!currentTemplate) return [3 /*break*/, 3];
|
|
2897
2902
|
/* [5] */ return [4 /*yield*/, Promise.race(resolving_1)];
|
|
@@ -2899,7 +2904,7 @@ function createPipelineExecutor(options) {
|
|
|
2899
2904
|
/* [5] */ _h.sent();
|
|
2900
2905
|
return [3 /*break*/, 4];
|
|
2901
2906
|
case 3:
|
|
2902
|
-
|
|
2907
|
+
unresovedTemplates_1 = unresovedTemplates_1.filter(function (template) { return template !== currentTemplate; });
|
|
2903
2908
|
work_1 = executeSingleTemplate(currentTemplate)
|
|
2904
2909
|
.then(function () {
|
|
2905
2910
|
resovedParameters_1 = __spreadArray(__spreadArray([], __read(resovedParameters_1), false), [currentTemplate.resultingParameterName], false);
|
|
@@ -2915,7 +2920,7 @@ function createPipelineExecutor(options) {
|
|
|
2915
2920
|
};
|
|
2916
2921
|
_g.label = 4;
|
|
2917
2922
|
case 4:
|
|
2918
|
-
if (!(
|
|
2923
|
+
if (!(unresovedTemplates_1.length > 0)) return [3 /*break*/, 6];
|
|
2919
2924
|
return [5 /*yield**/, _loop_1()];
|
|
2920
2925
|
case 5:
|
|
2921
2926
|
_g.sent();
|
|
@@ -5493,7 +5498,7 @@ function createCollectionFromDirectory(path, options) {
|
|
|
5493
5498
|
return [3 /*break*/, 7];
|
|
5494
5499
|
case 6:
|
|
5495
5500
|
if (isVerbose) {
|
|
5496
|
-
console.info(colors.gray("
|
|
5501
|
+
console.info(colors.gray("Skipped file ".concat(fileName.split('\\').join('/'), " \u2013\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060 Not a pipeline")));
|
|
5497
5502
|
}
|
|
5498
5503
|
_e.label = 7;
|
|
5499
5504
|
case 7:
|
|
@@ -5515,7 +5520,7 @@ function createCollectionFromDirectory(path, options) {
|
|
|
5515
5520
|
// TODO: [🐽] comparePipelines(pipeline1,pipeline2): 'IDENTICAL' |'IDENTICAL_UNPREPARED' | 'IDENTICAL_INTERFACE' | 'DIFFERENT'
|
|
5516
5521
|
!collection.has(pipeline.pipelineUrl)) {
|
|
5517
5522
|
if (isVerbose) {
|
|
5518
|
-
console.info(colors.
|
|
5523
|
+
console.info(colors.green("Loaded pipeline ".concat(fileName.split('\\').join('/'), "\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060")));
|
|
5519
5524
|
}
|
|
5520
5525
|
// Note: [🦄] Pipeline with same url uniqueness will be double-checked automatically in SimplePipelineCollection
|
|
5521
5526
|
collection.set(pipeline.pipelineUrl, pipeline);
|
|
@@ -5525,7 +5530,7 @@ function createCollectionFromDirectory(path, options) {
|
|
|
5525
5530
|
if (isVerbose) {
|
|
5526
5531
|
console.info(colors.gray("Skipped pipeline ".concat(fileName
|
|
5527
5532
|
.split('\\')
|
|
5528
|
-
.join('/'), " \u2013\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060 identical pipeline in the collection")));
|
|
5533
|
+
.join('/'), " \u2013\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060\u2060 Already identical pipeline in the collection")));
|
|
5529
5534
|
}
|
|
5530
5535
|
}
|
|
5531
5536
|
else {
|