@promptbook/node 0.62.0-0 → 0.62.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/esm/index.es.js +13 -6
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  4. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  5. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  6. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  7. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  8. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  11. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  12. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  14. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  15. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  16. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  17. package/package.json +2 -2
  18. package/umd/index.umd.js +35 -10
  19. package/umd/index.umd.js.map +1 -1
  20. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  21. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  22. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  23. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  24. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  25. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  26. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  27. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  28. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  29. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  30. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  31. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  32. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  33. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
package/esm/index.es.js CHANGED
@@ -4,6 +4,7 @@ import { join } from 'path';
4
4
  import spaceTrim, { spaceTrim as spaceTrim$1 } from 'spacetrim';
5
5
  import { format } from 'prettier';
6
6
  import parserHtml from 'prettier/parser-html';
7
+ import * as dotenv from 'dotenv';
7
8
  import Anthropic from '@anthropic-ai/sdk';
8
9
  import OpenAI from 'openai';
9
10
 
@@ -655,7 +656,7 @@ function forEachAsync(array, options, callbackfunction) {
655
656
  });
656
657
  }
657
658
 
658
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
659
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
659
660
 
660
661
  /**
661
662
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2272,7 +2273,7 @@ function union() {
2272
2273
  /**
2273
2274
  * The version of the Promptbook library
2274
2275
  */
2275
- var PROMPTBOOK_VERSION = '0.61.0';
2276
+ var PROMPTBOOK_VERSION = '0.62.0-0';
2276
2277
  // TODO: !!!! List here all the versions and annotate + put into script
2277
2278
 
2278
2279
  /**
@@ -3395,7 +3396,8 @@ function countTotalUsage(llmTools) {
3395
3396
  listModels: function () {
3396
3397
  return /* not await */ llmTools.listModels();
3397
3398
  },
3398
- get totalUsage() {
3399
+ getTotalUsage: function () {
3400
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3399
3401
  return totalUsage;
3400
3402
  },
3401
3403
  };
@@ -3593,7 +3595,7 @@ function preparePipeline(pipeline, options) {
3593
3595
  id: 1,
3594
3596
  // TODO: [🍥]> date: $currentDate(),
3595
3597
  promptbookVersion: PROMPTBOOK_VERSION,
3596
- modelUsage: ZERO_USAGE,
3598
+ usage: ZERO_USAGE,
3597
3599
  };
3598
3600
  preparations = [
3599
3601
  // ...preparations
@@ -3642,7 +3644,7 @@ function preparePipeline(pipeline, options) {
3642
3644
  promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3643
3645
  // ----- /Templates preparation -----
3644
3646
  // Note: Count total usage
3645
- currentPreparation.modelUsage = llmToolsWithUsage.totalUsage;
3647
+ currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3646
3648
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3647
3649
  }
3648
3650
  });
@@ -6025,7 +6027,7 @@ function computeUsageCounts(content) {
6025
6027
  * @private utility for initializating UncertainNumber
6026
6028
  */
6027
6029
  function uncertainNumber(value) {
6028
- if (value === null || value === undefined || Number.isNaN(NaN)) {
6030
+ if (value === null || value === undefined || Number.isNaN(value)) {
6029
6031
  return { value: 0, isUncertain: true };
6030
6032
  }
6031
6033
  return { value: value };
@@ -6339,6 +6341,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6339
6341
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6340
6342
  * TODO: Maybe make custom OpenaiError
6341
6343
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6344
+ * TODO: [🍜] Auto use anonymous server in browser
6342
6345
  */
6343
6346
 
6344
6347
  /**
@@ -7032,6 +7035,8 @@ var OpenAiExecutionTools = /** @class */ (function () {
7032
7035
  *
7033
7036
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7034
7037
  *
7038
+ * @@@ .env
7039
+ *
7035
7040
  * It looks for environment variables:
7036
7041
  * - `process.env.OPENAI_API_KEY`
7037
7042
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -7044,6 +7049,7 @@ function createLlmToolsFromEnv(options) {
7044
7049
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7045
7050
  }
7046
7051
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7052
+ dotenv.config(); // <- TODO: !!!!!! Double check [🟢]
7047
7053
  var llmTools = [];
7048
7054
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7049
7055
  llmTools.push(new OpenAiExecutionTools({
@@ -7068,6 +7074,7 @@ function createLlmToolsFromEnv(options) {
7068
7074
  }
7069
7075
  }
7070
7076
  /**
7077
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7071
7078
  * TODO: [🔼] !!! Export via `@promptbook/node`
7072
7079
  * TODO: @@@ write discussion about this - wizzard
7073
7080
  * TODO: Add Azure