@promptbook/node 0.66.0-0 → 0.66.0-1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +57 -19
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
- package/esm/typings/src/_packages/core.index.d.ts +6 -2
- package/esm/typings/src/config.d.ts +22 -0
- package/esm/typings/src/execution/LlmExecutionTools.d.ts +11 -5
- package/esm/typings/src/llm-providers/_common/config.d.ts +1 -6
- package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
- package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
- package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
- package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +5 -1
- package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +3 -0
- package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +4 -1
- package/esm/typings/src/utils/environment/getGlobalScope.d.ts +3 -0
- package/package.json +2 -2
- package/umd/index.umd.js +57 -19
- package/umd/index.umd.js.map +1 -1
package/esm/index.es.js
CHANGED
|
@@ -17,7 +17,7 @@ import OpenAI from 'openai';
|
|
|
17
17
|
/**
|
|
18
18
|
* The version of the Promptbook library
|
|
19
19
|
*/
|
|
20
|
-
var PROMPTBOOK_VERSION = '0.
|
|
20
|
+
var PROMPTBOOK_VERSION = '0.66.0-0';
|
|
21
21
|
// TODO: !!!! List here all the versions and annotate + put into script
|
|
22
22
|
|
|
23
23
|
/*! *****************************************************************************
|
|
@@ -249,6 +249,9 @@ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
|
|
|
249
249
|
* @private within the repository
|
|
250
250
|
*/
|
|
251
251
|
var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
|
|
252
|
+
/**
|
|
253
|
+
* TODO: [🧠][🧜♂️] Maybe join remoteUrl and path into single value
|
|
254
|
+
*/
|
|
252
255
|
|
|
253
256
|
/**
|
|
254
257
|
* Prettify the html code
|
|
@@ -693,7 +696,7 @@ function forEachAsync(array, options, callbackfunction) {
|
|
|
693
696
|
});
|
|
694
697
|
}
|
|
695
698
|
|
|
696
|
-
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.
|
|
699
|
+
var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
|
|
697
700
|
|
|
698
701
|
/**
|
|
699
702
|
* This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
|
|
@@ -6570,6 +6573,15 @@ var ANTHROPIC_CLAUDE_MODELS = [
|
|
|
6570
6573
|
* TODO: [🎰] Some mechanism to auto-update available models
|
|
6571
6574
|
*/
|
|
6572
6575
|
|
|
6576
|
+
/**
|
|
6577
|
+
* Get current date in ISO 8601 format
|
|
6578
|
+
*
|
|
6579
|
+
* @private internal utility
|
|
6580
|
+
*/
|
|
6581
|
+
function getCurrentIsoDate() {
|
|
6582
|
+
return new Date().toISOString();
|
|
6583
|
+
}
|
|
6584
|
+
|
|
6573
6585
|
/**
|
|
6574
6586
|
* Helper of usage compute
|
|
6575
6587
|
*
|
|
@@ -6604,13 +6616,42 @@ function uncertainNumber(value) {
|
|
|
6604
6616
|
}
|
|
6605
6617
|
|
|
6606
6618
|
/**
|
|
6607
|
-
*
|
|
6619
|
+
* Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
|
|
6608
6620
|
*
|
|
6609
|
-
* @
|
|
6621
|
+
* @param promptContent The content of the prompt
|
|
6622
|
+
* @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
|
|
6623
|
+
* @param rawResponse The raw response from Anthropic Claude API
|
|
6624
|
+
* @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
|
|
6625
|
+
* @private internal utility of `AnthropicClaudeExecutionTools`
|
|
6610
6626
|
*/
|
|
6611
|
-
function
|
|
6612
|
-
|
|
6627
|
+
function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
|
|
6628
|
+
resultContent, rawResponse) {
|
|
6629
|
+
var _a, _b;
|
|
6630
|
+
if (rawResponse.usage === undefined) {
|
|
6631
|
+
throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
|
|
6632
|
+
}
|
|
6633
|
+
if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
|
|
6634
|
+
throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
|
|
6635
|
+
}
|
|
6636
|
+
var inputTokens = rawResponse.usage.input_tokens;
|
|
6637
|
+
var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
|
|
6638
|
+
var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
|
|
6639
|
+
var price;
|
|
6640
|
+
if (modelInfo === undefined || modelInfo.pricing === undefined) {
|
|
6641
|
+
price = uncertainNumber();
|
|
6642
|
+
}
|
|
6643
|
+
else {
|
|
6644
|
+
price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
|
|
6645
|
+
}
|
|
6646
|
+
return {
|
|
6647
|
+
price: price,
|
|
6648
|
+
input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
|
|
6649
|
+
output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
|
|
6650
|
+
};
|
|
6613
6651
|
}
|
|
6652
|
+
/**
|
|
6653
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
|
|
6654
|
+
*/
|
|
6614
6655
|
|
|
6615
6656
|
/**
|
|
6616
6657
|
* Execution Tools for calling Anthropic Claude API.
|
|
@@ -6631,9 +6672,8 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6631
6672
|
var anthropicOptions = __assign({}, options);
|
|
6632
6673
|
delete anthropicOptions.isVerbose;
|
|
6633
6674
|
delete anthropicOptions.isProxied;
|
|
6634
|
-
this.client = new Anthropic(
|
|
6635
|
-
//
|
|
6636
|
-
anthropicOptions);
|
|
6675
|
+
this.client = new Anthropic(anthropicOptions);
|
|
6676
|
+
// <- TODO: !!!!!! Lazy-load client
|
|
6637
6677
|
}
|
|
6638
6678
|
Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
|
|
6639
6679
|
get: function () {
|
|
@@ -6704,15 +6744,10 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
|
|
|
6704
6744
|
if (contentBlock.type !== 'text') {
|
|
6705
6745
|
throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
|
|
6706
6746
|
}
|
|
6707
|
-
console.log('!!!!!! rawResponse.usage', rawResponse.usage);
|
|
6708
6747
|
resultContent = contentBlock.text;
|
|
6709
6748
|
// eslint-disable-next-line prefer-const
|
|
6710
6749
|
complete = getCurrentIsoDate();
|
|
6711
|
-
usage =
|
|
6712
|
-
price: { value: 0, isUncertain: true } /* <- TODO: [🐞] !!!!!! Compute usage */,
|
|
6713
|
-
input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
|
|
6714
|
-
output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
|
|
6715
|
-
};
|
|
6750
|
+
usage = computeAnthropicClaudeUsage(content, '', rawResponse);
|
|
6716
6751
|
return [2 /*return*/, {
|
|
6717
6752
|
content: resultContent,
|
|
6718
6753
|
modelName: rawResponse.model,
|
|
@@ -7237,9 +7272,8 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
|
|
|
7237
7272
|
this.options = options;
|
|
7238
7273
|
this.client = new OpenAIClient(
|
|
7239
7274
|
// <- TODO: [🧱] Implement in a functional (not new Class) way
|
|
7240
|
-
"https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(
|
|
7241
|
-
//
|
|
7242
|
-
options.apiKey));
|
|
7275
|
+
"https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
|
|
7276
|
+
// <- TODO: !!!!!! Lazy-load client
|
|
7243
7277
|
}
|
|
7244
7278
|
Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
|
|
7245
7279
|
get: function () {
|
|
@@ -7509,6 +7543,9 @@ resultContent, rawResponse) {
|
|
|
7509
7543
|
output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
|
|
7510
7544
|
};
|
|
7511
7545
|
}
|
|
7546
|
+
/**
|
|
7547
|
+
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
|
|
7548
|
+
*/
|
|
7512
7549
|
|
|
7513
7550
|
/**
|
|
7514
7551
|
* Execution Tools for calling OpenAI API.
|
|
@@ -7529,6 +7566,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
|
|
|
7529
7566
|
delete openAiOptions.isVerbose;
|
|
7530
7567
|
delete openAiOptions.user;
|
|
7531
7568
|
this.client = new OpenAI(__assign({}, openAiOptions));
|
|
7569
|
+
// <- TODO: !!!!!! Lazy-load client
|
|
7532
7570
|
}
|
|
7533
7571
|
Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
|
|
7534
7572
|
get: function () {
|
|
@@ -7830,7 +7868,7 @@ var EXECUTION_TOOLS_CLASSES = {
|
|
|
7830
7868
|
// <- Note: [🦑] Add here new LLM provider
|
|
7831
7869
|
};
|
|
7832
7870
|
/**
|
|
7833
|
-
* TODO:
|
|
7871
|
+
* TODO: !!!!!!! Make global register for this
|
|
7834
7872
|
* TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
|
|
7835
7873
|
*/
|
|
7836
7874
|
|