@promptbook/node 0.67.1 → 0.67.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +24 -23
- package/esm/index.es.js.map +1 -1
- package/esm/typings/promptbook-collection/index.d.ts +0 -2
- package/esm/typings/src/_packages/core.index.d.ts +2 -0
- package/esm/typings/src/errors/{_ExpectError.d.ts → ExpectError.d.ts} +2 -1
- package/esm/typings/src/errors/index.d.ts +2 -0
- package/esm/typings/src/prepare/preparePipeline.d.ts +2 -0
- package/package.json +2 -2
- package/umd/index.umd.js +24 -23
- package/umd/index.umd.js.map +1 -1
|
@@ -14,7 +14,6 @@ declare const _default: ({
|
|
|
14
14
|
title: string;
|
|
15
15
|
modelRequirements: {
|
|
16
16
|
modelVariant: string;
|
|
17
|
-
modelName: string;
|
|
18
17
|
};
|
|
19
18
|
content: string;
|
|
20
19
|
dependentParameterNames: string[];
|
|
@@ -41,7 +40,6 @@ declare const _default: ({
|
|
|
41
40
|
title: string;
|
|
42
41
|
modelRequirements: {
|
|
43
42
|
modelVariant: string;
|
|
44
|
-
modelName: string;
|
|
45
43
|
};
|
|
46
44
|
content: string;
|
|
47
45
|
expectations: {
|
|
@@ -27,6 +27,7 @@ import { stringifyPipelineJson } from '../conversion/utils/stringifyPipelineJson
|
|
|
27
27
|
import { validatePipeline } from '../conversion/validation/validatePipeline';
|
|
28
28
|
import { CollectionError } from '../errors/CollectionError';
|
|
29
29
|
import { EnvironmentMismatchError } from '../errors/EnvironmentMismatchError';
|
|
30
|
+
import { ExpectError } from '../errors/ExpectError';
|
|
30
31
|
import { ERRORS } from '../errors/index';
|
|
31
32
|
import { LimitReachedError } from '../errors/LimitReachedError';
|
|
32
33
|
import { NotFoundError } from '../errors/NotFoundError';
|
|
@@ -99,6 +100,7 @@ export { stringifyPipelineJson };
|
|
|
99
100
|
export { validatePipeline };
|
|
100
101
|
export { CollectionError };
|
|
101
102
|
export { EnvironmentMismatchError };
|
|
103
|
+
export { ExpectError };
|
|
102
104
|
export { ERRORS };
|
|
103
105
|
export { LimitReachedError };
|
|
104
106
|
export { NotFoundError };
|
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* This error occurs when some expectation is not met in the execution of the pipeline
|
|
3
3
|
*
|
|
4
|
-
* @
|
|
4
|
+
* @public exported from `@promptbook/core`
|
|
5
|
+
* Note: Do not throw this error, its reserved for `checkExpectations` and `createPipelineExecutor` and public ONLY to be serializable through remote server
|
|
5
6
|
* Note: Always thrown in `checkExpectations` and catched in `createPipelineExecutor` and rethrown as `PipelineExecutionError`
|
|
6
7
|
* Note: This is a kindof subtype of PipelineExecutionError
|
|
7
8
|
*/
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { CollectionError } from './CollectionError';
|
|
2
2
|
import { EnvironmentMismatchError } from './EnvironmentMismatchError';
|
|
3
|
+
import { ExpectError } from './ExpectError';
|
|
3
4
|
import { LimitReachedError } from './LimitReachedError';
|
|
4
5
|
import { NotFoundError } from './NotFoundError';
|
|
5
6
|
import { NotYetImplementedError } from './NotYetImplementedError';
|
|
@@ -14,6 +15,7 @@ import { UnexpectedError } from './UnexpectedError';
|
|
|
14
15
|
* @public exported from `@promptbook/core`
|
|
15
16
|
*/
|
|
16
17
|
export declare const ERRORS: {
|
|
18
|
+
readonly ExpectError: typeof ExpectError;
|
|
17
19
|
readonly CollectionError: typeof CollectionError;
|
|
18
20
|
readonly EnvironmentMismatchError: typeof EnvironmentMismatchError;
|
|
19
21
|
readonly LimitReachedError: typeof LimitReachedError;
|
|
@@ -14,4 +14,6 @@ export declare function preparePipeline(pipeline: PipelineJson, options: Prepare
|
|
|
14
14
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
15
15
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
16
16
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
17
|
+
* TODO: [🧠][♏] Maybe if expecting JSON (In Anthropic Claude and other models without non-json) and its not specified in prompt content, append the instructions
|
|
18
|
+
* @see https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/increase-consistency#specify-the-desired-output-format
|
|
17
19
|
*/
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@promptbook/node",
|
|
3
|
-
"version": "0.67.
|
|
3
|
+
"version": "0.67.2",
|
|
4
4
|
"description": "Supercharge your use of large language models",
|
|
5
5
|
"private": false,
|
|
6
6
|
"sideEffects": false,
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"module": "./esm/index.es.js",
|
|
48
48
|
"typings": "./esm/typings/src/_packages/node.index.d.ts",
|
|
49
49
|
"peerDependencies": {
|
|
50
|
-
"@promptbook/core": "0.67.
|
|
50
|
+
"@promptbook/core": "0.67.2"
|
|
51
51
|
},
|
|
52
52
|
"dependencies": {
|
|
53
53
|
"colors": "1.4.0",
|
package/umd/index.umd.js
CHANGED
|
@@ -35,7 +35,7 @@
|
|
|
35
35
|
/**
|
|
36
36
|
* The version of the Promptbook library
|
|
37
37
|
*/
|
|
38
|
-
var PROMPTBOOK_VERSION = '0.67.
|
|
38
|
+
var PROMPTBOOK_VERSION = '0.67.1';
|
|
39
39
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
40
40
|
|
|
41
41
|
/*! *****************************************************************************
|
|
@@ -889,7 +889,7 @@
|
|
|
889
889
|
});
|
|
890
890
|
}
|
|
891
891
|
|
|
892
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.67.
|
|
892
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.67.1",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.67.1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.67.1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.67.1",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n- Your output format is JSON object\n- It contains the following keys:\n - `modelName`: The name of the model to use\n - `systemMessage`: The system message to provide context to the model\n - `temperature`: The sampling temperature to use\n\n### Key `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Key `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Key `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
893
893
|
|
|
894
894
|
/**
|
|
895
895
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -1916,6 +1916,25 @@
|
|
|
1916
1916
|
return EnvironmentMismatchError;
|
|
1917
1917
|
}(Error));
|
|
1918
1918
|
|
|
1919
|
+
/**
|
|
1920
|
+
* This error occurs when some expectation is not met in the execution of the pipeline
|
|
1921
|
+
*
|
|
1922
|
+
* @public exported from `@promptbook/core`
|
|
1923
|
+
* Note: Do not throw this error, its reserved for `checkExpectations` and `createPipelineExecutor` and public ONLY to be serializable through remote server
|
|
1924
|
+
* Note: Always thrown in `checkExpectations` and catched in `createPipelineExecutor` and rethrown as `PipelineExecutionError`
|
|
1925
|
+
* Note: This is a kindof subtype of PipelineExecutionError
|
|
1926
|
+
*/
|
|
1927
|
+
var ExpectError = /** @class */ (function (_super) {
|
|
1928
|
+
__extends(ExpectError, _super);
|
|
1929
|
+
function ExpectError(message) {
|
|
1930
|
+
var _this = _super.call(this, message) || this;
|
|
1931
|
+
_this.name = 'ExpectError';
|
|
1932
|
+
Object.setPrototypeOf(_this, ExpectError.prototype);
|
|
1933
|
+
return _this;
|
|
1934
|
+
}
|
|
1935
|
+
return ExpectError;
|
|
1936
|
+
}(Error));
|
|
1937
|
+
|
|
1919
1938
|
/**
|
|
1920
1939
|
* This error type indicates that some limit was reached
|
|
1921
1940
|
*
|
|
@@ -1954,6 +1973,7 @@
|
|
|
1954
1973
|
* @public exported from `@promptbook/core`
|
|
1955
1974
|
*/
|
|
1956
1975
|
var ERRORS = {
|
|
1976
|
+
ExpectError: ExpectError,
|
|
1957
1977
|
CollectionError: CollectionError,
|
|
1958
1978
|
EnvironmentMismatchError: EnvironmentMismatchError,
|
|
1959
1979
|
LimitReachedError: LimitReachedError,
|
|
@@ -2121,24 +2141,6 @@
|
|
|
2121
2141
|
* TODO: [🔣] If script require contentLanguage
|
|
2122
2142
|
*/
|
|
2123
2143
|
|
|
2124
|
-
/**
|
|
2125
|
-
* This error occurs when some expectation is not met in the execution of the pipeline
|
|
2126
|
-
*
|
|
2127
|
-
* @private error of `checkExpectations` and `createPipelineExecutor`
|
|
2128
|
-
* Note: Always thrown in `checkExpectations` and catched in `createPipelineExecutor` and rethrown as `PipelineExecutionError`
|
|
2129
|
-
* Note: This is a kindof subtype of PipelineExecutionError
|
|
2130
|
-
*/
|
|
2131
|
-
var ExpectError = /** @class */ (function (_super) {
|
|
2132
|
-
__extends(ExpectError, _super);
|
|
2133
|
-
function ExpectError(message) {
|
|
2134
|
-
var _this = _super.call(this, message) || this;
|
|
2135
|
-
_this.name = 'ExpectError';
|
|
2136
|
-
Object.setPrototypeOf(_this, ExpectError.prototype);
|
|
2137
|
-
return _this;
|
|
2138
|
-
}
|
|
2139
|
-
return ExpectError;
|
|
2140
|
-
}(Error));
|
|
2141
|
-
|
|
2142
2144
|
/**
|
|
2143
2145
|
* Serializes an error into a [🚉] JSON-serializable object
|
|
2144
2146
|
*
|
|
@@ -3253,9 +3255,6 @@
|
|
|
3253
3255
|
if (!(error_4 instanceof ExpectError)) {
|
|
3254
3256
|
throw error_4;
|
|
3255
3257
|
}
|
|
3256
|
-
if (error_4 instanceof UnexpectedError) {
|
|
3257
|
-
throw error_4;
|
|
3258
|
-
}
|
|
3259
3258
|
expectError = error_4;
|
|
3260
3259
|
return [3 /*break*/, 50];
|
|
3261
3260
|
case 49:
|
|
@@ -4074,6 +4073,8 @@
|
|
|
4074
4073
|
* TODO: [🏏] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch
|
|
4075
4074
|
* TODO: [🧊] In future one preparation can take data from previous preparation and save tokens and time
|
|
4076
4075
|
* TODO: [🛠] Actions, instruments (and maybe knowledge) => Functions and tools
|
|
4076
|
+
* TODO: [🧠][♏] Maybe if expecting JSON (In Anthropic Claude and other models without non-json) and its not specified in prompt content, append the instructions
|
|
4077
|
+
* @see https://docs.anthropic.com/en/docs/test-and-evaluate/strengthen-guardrails/increase-consistency#specify-the-desired-output-format
|
|
4077
4078
|
*/
|
|
4078
4079
|
|
|
4079
4080
|
/**
|