@promptbook/node 0.61.0-27 โ 0.61.0-28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +30 -18
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/execution/PromptResult.d.ts +15 -0
- package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
- package/esm/typings/src/prepare/preparePipeline.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +30 -18
- package/umd/index.umd.js.map +1 -1
- package/umd/typings/src/execution/PromptResult.d.ts +15 -0
- package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -1
- package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +2 -2
- package/umd/typings/src/prepare/preparePipeline.d.ts +1 -1
package/esm/index.es.js
CHANGED
|
@@ -654,7 +654,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
654
654
|
});
|
|
655
655
|
}
|
|
656
656
|
|
|
657
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-
|
|
657
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0-27",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0-27",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
658
658
|
|
|
659
659
|
/**
|
|
660
660
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -2268,7 +2268,7 @@ function union() {
|
|
|
2268
2268
|
/**
|
|
2269
2269
|
* The version of the Promptbook library
|
|
2270
2270
|
*/
|
|
2271
|
-
var PROMPTBOOK_VERSION = '0.61.0-
|
|
2271
|
+
var PROMPTBOOK_VERSION = '0.61.0-27';
|
|
2272
2272
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
2273
2273
|
|
|
2274
2274
|
/**
|
|
@@ -3443,7 +3443,7 @@ function prepareTemplates(pipeline, options) {
|
|
|
3443
3443
|
case 0:
|
|
3444
3444
|
_a = options.maxParallelCount, maxParallelCount = _a === void 0 ? MAX_PARALLEL_COUNT : _a;
|
|
3445
3445
|
promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
|
|
3446
|
-
// TODO:
|
|
3446
|
+
// TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
|
|
3447
3447
|
TODO_USE(parameters);
|
|
3448
3448
|
promptTemplatesPrepared = new Array(promptTemplates.length);
|
|
3449
3449
|
return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [๐ช] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
|
|
@@ -3552,7 +3552,7 @@ function preparePipeline(pipeline, options) {
|
|
|
3552
3552
|
* TODO: Write tests for `preparePipeline`
|
|
3553
3553
|
* TODO: [๐] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
3554
3554
|
* TODO: [๐ง] In future one preparation can take data from previous preparation and save tokens and time
|
|
3555
|
-
* TODO: [๐]
|
|
3555
|
+
* TODO: [๐] !!!!! Use here countTotalUsage
|
|
3556
3556
|
* TODO: [๐ ] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
3557
3557
|
*/
|
|
3558
3558
|
|
|
@@ -6058,7 +6058,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6058
6058
|
*/
|
|
6059
6059
|
AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6060
6060
|
return __awaiter(this, void 0, void 0, function () {
|
|
6061
|
-
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6061
|
+
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6062
6062
|
return __generator(this, function (_a) {
|
|
6063
6063
|
switch (_a.label) {
|
|
6064
6064
|
case 0:
|
|
@@ -6071,6 +6071,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6071
6071
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
6072
6072
|
}
|
|
6073
6073
|
modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
6074
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6074
6075
|
rawRequest = {
|
|
6075
6076
|
model: modelRequirements.modelName || this.getDefaultChatModel().modelName,
|
|
6076
6077
|
max_tokens: modelRequirements.maxTokens || 4096,
|
|
@@ -6082,7 +6083,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6082
6083
|
messages: [
|
|
6083
6084
|
{
|
|
6084
6085
|
role: 'user',
|
|
6085
|
-
content:
|
|
6086
|
+
content: rawPromptContent,
|
|
6086
6087
|
},
|
|
6087
6088
|
],
|
|
6088
6089
|
// TODO: Is here some equivalent of user identification?> user: this.options.user,
|
|
@@ -6119,8 +6120,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6119
6120
|
complete: complete,
|
|
6120
6121
|
},
|
|
6121
6122
|
usage: usage,
|
|
6123
|
+
rawPromptContent: rawPromptContent,
|
|
6124
|
+
rawRequest: rawRequest,
|
|
6122
6125
|
rawResponse: rawResponse,
|
|
6123
|
-
// <- [
|
|
6126
|
+
// <- [๐ฏ]
|
|
6124
6127
|
}];
|
|
6125
6128
|
}
|
|
6126
6129
|
});
|
|
@@ -6153,7 +6156,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6153
6156
|
|
|
6154
6157
|
const rawRequest: xxxx.Completions.CompletionCreateParamsNonStreaming = {
|
|
6155
6158
|
...modelSettings,
|
|
6156
|
-
prompt:
|
|
6159
|
+
prompt: rawPromptContent,
|
|
6157
6160
|
user: this.options.user,
|
|
6158
6161
|
};
|
|
6159
6162
|
const start: string_date_iso8601 = getCurrentIsoDate();
|
|
@@ -6192,7 +6195,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6192
6195
|
},
|
|
6193
6196
|
usage,
|
|
6194
6197
|
rawResponse,
|
|
6195
|
-
// <- [
|
|
6198
|
+
// <- [๐ฏ]
|
|
6196
6199
|
};
|
|
6197
6200
|
}
|
|
6198
6201
|
*/
|
|
@@ -6660,7 +6663,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6660
6663
|
*/
|
|
6661
6664
|
OpenAiExecutionTools.prototype.callChatModel = function (prompt) {
|
|
6662
6665
|
return __awaiter(this, void 0, void 0, function () {
|
|
6663
|
-
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6666
|
+
var content, parameters, modelRequirements, expectFormat, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6664
6667
|
return __generator(this, function (_a) {
|
|
6665
6668
|
switch (_a.label) {
|
|
6666
6669
|
case 0:
|
|
@@ -6686,6 +6689,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6686
6689
|
type: 'json_object',
|
|
6687
6690
|
};
|
|
6688
6691
|
}
|
|
6692
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6689
6693
|
rawRequest = __assign(__assign({}, modelSettings), { messages: __spreadArray(__spreadArray([], __read((modelRequirements.systemMessage === undefined
|
|
6690
6694
|
? []
|
|
6691
6695
|
: [
|
|
@@ -6696,7 +6700,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6696
6700
|
])), false), [
|
|
6697
6701
|
{
|
|
6698
6702
|
role: 'user',
|
|
6699
|
-
content:
|
|
6703
|
+
content: rawPromptContent,
|
|
6700
6704
|
},
|
|
6701
6705
|
], false), user: this.options.user });
|
|
6702
6706
|
start = getCurrentIsoDate();
|
|
@@ -6731,8 +6735,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6731
6735
|
complete: complete,
|
|
6732
6736
|
},
|
|
6733
6737
|
usage: usage,
|
|
6738
|
+
rawPromptContent: rawPromptContent,
|
|
6739
|
+
rawRequest: rawRequest,
|
|
6734
6740
|
rawResponse: rawResponse,
|
|
6735
|
-
// <- [
|
|
6741
|
+
// <- [๐ฏ]
|
|
6736
6742
|
}];
|
|
6737
6743
|
}
|
|
6738
6744
|
});
|
|
@@ -6743,7 +6749,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6743
6749
|
*/
|
|
6744
6750
|
OpenAiExecutionTools.prototype.callCompletionModel = function (prompt) {
|
|
6745
6751
|
return __awaiter(this, void 0, void 0, function () {
|
|
6746
|
-
var content, parameters, modelRequirements, modelName, modelSettings, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6752
|
+
var content, parameters, modelRequirements, modelName, modelSettings, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6747
6753
|
return __generator(this, function (_a) {
|
|
6748
6754
|
switch (_a.label) {
|
|
6749
6755
|
case 0:
|
|
@@ -6764,7 +6770,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6764
6770
|
// <- TODO: [๐] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
6765
6771
|
// <- Note: [๐ง]
|
|
6766
6772
|
};
|
|
6767
|
-
|
|
6773
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6774
|
+
rawRequest = __assign(__assign({}, modelSettings), { prompt: rawPromptContent, user: this.options.user });
|
|
6768
6775
|
start = getCurrentIsoDate();
|
|
6769
6776
|
if (this.options.isVerbose) {
|
|
6770
6777
|
console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
@@ -6794,8 +6801,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6794
6801
|
complete: complete,
|
|
6795
6802
|
},
|
|
6796
6803
|
usage: usage,
|
|
6804
|
+
rawPromptContent: rawPromptContent,
|
|
6805
|
+
rawRequest: rawRequest,
|
|
6797
6806
|
rawResponse: rawResponse,
|
|
6798
|
-
// <- [
|
|
6807
|
+
// <- [๐ฏ]
|
|
6799
6808
|
}];
|
|
6800
6809
|
}
|
|
6801
6810
|
});
|
|
@@ -6806,7 +6815,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6806
6815
|
*/
|
|
6807
6816
|
OpenAiExecutionTools.prototype.callEmbeddingModel = function (prompt) {
|
|
6808
6817
|
return __awaiter(this, void 0, void 0, function () {
|
|
6809
|
-
var content, parameters, modelRequirements, modelName, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6818
|
+
var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
|
|
6810
6819
|
return __generator(this, function (_a) {
|
|
6811
6820
|
switch (_a.label) {
|
|
6812
6821
|
case 0:
|
|
@@ -6819,8 +6828,9 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6819
6828
|
throw new PipelineExecutionError('Use embed only for EMBEDDING variant');
|
|
6820
6829
|
}
|
|
6821
6830
|
modelName = modelRequirements.modelName || this.getDefaultEmbeddingModel().modelName;
|
|
6831
|
+
rawPromptContent = replaceParameters(content, __assign(__assign({}, parameters), { modelName: modelName }));
|
|
6822
6832
|
rawRequest = {
|
|
6823
|
-
input:
|
|
6833
|
+
input: rawPromptContent,
|
|
6824
6834
|
model: modelName,
|
|
6825
6835
|
};
|
|
6826
6836
|
start = getCurrentIsoDate();
|
|
@@ -6848,8 +6858,10 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
6848
6858
|
complete: complete,
|
|
6849
6859
|
},
|
|
6850
6860
|
usage: usage,
|
|
6861
|
+
rawPromptContent: rawPromptContent,
|
|
6862
|
+
rawRequest: rawRequest,
|
|
6851
6863
|
rawResponse: rawResponse,
|
|
6852
|
-
// <- [
|
|
6864
|
+
// <- [๐ฏ]
|
|
6853
6865
|
}];
|
|
6854
6866
|
}
|
|
6855
6867
|
});
|