@promptbook/node 0.67.7 → 0.67.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/node",
|
|
3
|
-
"version": "0.67.
|
|
3
|
+
"version": "0.67.8",
|
|
4
4
|
"description": "Supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"module": "./esm/index.es.js",
|
|
48
48
|
"typings": "./esm/typings/src/_packages/node.index.d.ts",
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.67.
|
|
50
|
+
"@promptbook/core": "0.67.8"
|
|
51
51
|
},
|
|
52
52
|
"dependencies": {
|
|
53
53
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -35,7 +35,7 @@
|
|
|
35
35
|
/**
|
|
36
36
|
* The version of the Promptbook library
|
|
37
37
|
*/
|
|
38
|
-
var PROMPTBOOK_VERSION = '0.67.
|
|
38
|
+
var PROMPTBOOK_VERSION = '0.67.7';
|
|
39
39
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
40
40
|
|
|
41
41
|
/*! *****************************************************************************
|
|
@@ -889,7 +889,7 @@
|
|
|
889
889
|
});
|
|
890
890
|
}
|
|
891
891
|
|
|
892
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.67.
|
|
892
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.67.7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.67.7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.67.7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.67.7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- Write just the JSON object, no other text should be present\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
893
893
|
|
|
894
894
|
/**
|
|
895
895
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -3074,7 +3074,7 @@
|
|
|
3074
3074
|
}
|
|
3075
3075
|
function executeSingleTemplate(currentTemplate) {
|
|
3076
3076
|
return __awaiter(this, void 0, void 0, function () {
|
|
3077
|
-
var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _loop_4, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, _loop_5, attempt, state_2;
|
|
3077
|
+
var name, title, priority, progress_1, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _loop_4, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, _loop_5, attempt, state_2, progress_2;
|
|
3078
3078
|
var e_4, _f, _g;
|
|
3079
3079
|
return __generator(this, function (_h) {
|
|
3080
3080
|
switch (_h.label) {
|
|
@@ -3083,16 +3083,23 @@
|
|
|
3083
3083
|
title = currentTemplate.title;
|
|
3084
3084
|
priority = preparedPipeline.promptTemplates.length - preparedPipeline.promptTemplates.indexOf(currentTemplate);
|
|
3085
3085
|
if (!onProgress /* <- [3] */) return [3 /*break*/, 2]; /* <- [3] */
|
|
3086
|
-
|
|
3087
|
-
|
|
3088
|
-
|
|
3089
|
-
|
|
3090
|
-
|
|
3091
|
-
|
|
3092
|
-
|
|
3093
|
-
|
|
3094
|
-
|
|
3095
|
-
|
|
3086
|
+
progress_1 = {
|
|
3087
|
+
name: name,
|
|
3088
|
+
title: title,
|
|
3089
|
+
isStarted: false,
|
|
3090
|
+
isDone: false,
|
|
3091
|
+
blockType: currentTemplate.blockType,
|
|
3092
|
+
parameterName: currentTemplate.resultingParameterName,
|
|
3093
|
+
parameterValue: null,
|
|
3094
|
+
// <- [3]
|
|
3095
|
+
};
|
|
3096
|
+
if (isReturned) {
|
|
3097
|
+
throw new UnexpectedError(spaceTrim.spaceTrim(function (block) { return "\n Can not call `onProgress` after pipeline execution is finished \uD83C\uDF4F\n\n ".concat(block(pipelineIdentification), "\n\n ").concat(block(JSON.stringify(progress_1, null, 4)
|
|
3098
|
+
.split('\n')
|
|
3099
|
+
.map(function (line) { return "> ".concat(line); })
|
|
3100
|
+
.join('\n')), "\n "); }));
|
|
3101
|
+
}
|
|
3102
|
+
return [4 /*yield*/, onProgress(progress_1)];
|
|
3096
3103
|
case 1:
|
|
3097
3104
|
_h.sent();
|
|
3098
3105
|
_h.label = 2;
|
|
@@ -3478,18 +3485,28 @@
|
|
|
3478
3485
|
if (resultString === null) {
|
|
3479
3486
|
throw new UnexpectedError(spaceTrim.spaceTrim(function (block) { return "\n Something went wrong and prompt result is null\n\n ".concat(block(pipelineIdentification), "\n "); }));
|
|
3480
3487
|
}
|
|
3481
|
-
if (onProgress /* <- [3] */)
|
|
3482
|
-
|
|
3483
|
-
|
|
3484
|
-
|
|
3485
|
-
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
|
|
3489
|
-
|
|
3490
|
-
|
|
3491
|
-
|
|
3488
|
+
if (!onProgress /* <- [3] */) return [3 /*break*/, 9]; /* <- [3] */
|
|
3489
|
+
progress_2 = {
|
|
3490
|
+
name: name,
|
|
3491
|
+
title: title,
|
|
3492
|
+
isStarted: true,
|
|
3493
|
+
isDone: true,
|
|
3494
|
+
blockType: currentTemplate.blockType,
|
|
3495
|
+
parameterName: currentTemplate.resultingParameterName,
|
|
3496
|
+
parameterValue: resultString,
|
|
3497
|
+
// <- [3]
|
|
3498
|
+
};
|
|
3499
|
+
if (isReturned) {
|
|
3500
|
+
throw new UnexpectedError(spaceTrim.spaceTrim(function (block) { return "\n Can not call `onProgress` after pipeline execution is finished \uD83C\uDF4E\n\n ".concat(block(pipelineIdentification), "\n\n ").concat(block(JSON.stringify(progress_2, null, 4)
|
|
3501
|
+
.split('\n')
|
|
3502
|
+
.map(function (line) { return "> ".concat(line); })
|
|
3503
|
+
.join('\n')), "\n\n "); }));
|
|
3492
3504
|
}
|
|
3505
|
+
return [4 /*yield*/, onProgress(progress_2)];
|
|
3506
|
+
case 8:
|
|
3507
|
+
_h.sent();
|
|
3508
|
+
_h.label = 9;
|
|
3509
|
+
case 9:
|
|
3493
3510
|
parametersToPass = Object.freeze(__assign(__assign({}, parametersToPass), (_g = {}, _g[currentTemplate.resultingParameterName] = resultString /* <- Note: Not need to detect parameter collision here because pipeline checks logic consistency during construction */, _g)));
|
|
3494
3511
|
return [2 /*return*/];
|
|
3495
3512
|
}
|
|
@@ -3526,7 +3543,7 @@
|
|
|
3526
3543
|
}
|
|
3527
3544
|
return outputParameters;
|
|
3528
3545
|
}
|
|
3529
|
-
var errors, warnings, executionReport, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
|
|
3546
|
+
var errors, warnings, executionReport, isReturned, _a, _b, parameter, _loop_1, _c, _d, parameterName, state_1, parametersToPass, resovedParameterNames_1, unresovedTemplates_1, resolving_1, loopLimit, _loop_2, error_1, usage_1, outputParameters_1, usage, outputParameters;
|
|
3530
3547
|
var e_1, _e, e_2, _f;
|
|
3531
3548
|
return __generator(this, function (_g) {
|
|
3532
3549
|
switch (_g.label) {
|
|
@@ -3551,6 +3568,7 @@
|
|
|
3551
3568
|
description: preparedPipeline.description,
|
|
3552
3569
|
promptExecutions: [],
|
|
3553
3570
|
};
|
|
3571
|
+
isReturned = false;
|
|
3554
3572
|
try {
|
|
3555
3573
|
// Note: Check that all input input parameters are defined
|
|
3556
3574
|
for (_a = __values(preparedPipeline.parameters.filter(function (_a) {
|
|
@@ -3559,6 +3577,7 @@
|
|
|
3559
3577
|
})), _b = _a.next(); !_b.done; _b = _a.next()) {
|
|
3560
3578
|
parameter = _b.value;
|
|
3561
3579
|
if (inputParameters[parameter.name] === undefined) {
|
|
3580
|
+
isReturned = true;
|
|
3562
3581
|
return [2 /*return*/, $asDeeplyFrozenSerializableJson("Unuccessful PipelineExecutorResult (with missing parameter {".concat(parameter.name, "}) PipelineExecutorResult"), {
|
|
3563
3582
|
isSuccessful: false,
|
|
3564
3583
|
errors: __spreadArray([
|
|
@@ -3589,6 +3608,7 @@
|
|
|
3589
3608
|
warnings.push(new PipelineExecutionError(spaceTrim.spaceTrim(function (block) { return "\n Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.\n\n ").concat(block(pipelineIdentification), "\n "); })));
|
|
3590
3609
|
}
|
|
3591
3610
|
else if (parameter.isInput === false) {
|
|
3611
|
+
isReturned = true;
|
|
3592
3612
|
return { value: $asDeeplyFrozenSerializableJson(spaceTrim.spaceTrim(function (block) { return "\n Unuccessful PipelineExecutorResult (with extra parameter {".concat(parameter.name, "}) PipelineExecutorResult\n\n ").concat(block(pipelineIdentification), "\n "); }), {
|
|
3593
3613
|
isSuccessful: false,
|
|
3594
3614
|
errors: __spreadArray([
|
|
@@ -3701,6 +3721,7 @@
|
|
|
3701
3721
|
return (result === null || result === void 0 ? void 0 : result.usage) || ZERO_USAGE;
|
|
3702
3722
|
})), false));
|
|
3703
3723
|
outputParameters_1 = filterJustOutputParameters();
|
|
3724
|
+
isReturned = true;
|
|
3704
3725
|
return [2 /*return*/, $asDeeplyFrozenSerializableJson('Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult', {
|
|
3705
3726
|
isSuccessful: false,
|
|
3706
3727
|
errors: __spreadArray([error_1], __read(errors), false).map(serializeError),
|
|
@@ -3716,6 +3737,7 @@
|
|
|
3716
3737
|
return (result === null || result === void 0 ? void 0 : result.usage) || ZERO_USAGE;
|
|
3717
3738
|
})), false));
|
|
3718
3739
|
outputParameters = filterJustOutputParameters();
|
|
3740
|
+
isReturned = true;
|
|
3719
3741
|
return [2 /*return*/, $asDeeplyFrozenSerializableJson('Successful PipelineExecutorResult', {
|
|
3720
3742
|
isSuccessful: true,
|
|
3721
3743
|
errors: errors.map(serializeError),
|