@promptbook/node 0.62.0-0 → 0.62.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/esm/index.es.js +14 -7
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  4. package/esm/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  5. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  6. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  7. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  8. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  9. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  11. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  13. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  15. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  16. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  17. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  18. package/package.json +2 -2
  19. package/umd/index.umd.js +36 -11
  20. package/umd/index.umd.js.map +1 -1
  21. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  22. package/umd/typings/src/execution/createPipelineExecutor.d.ts +1 -1
  23. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  24. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  25. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  26. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  27. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  28. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  29. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  30. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  31. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  32. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  33. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  34. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  35. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
@@ -26,7 +26,7 @@ declare const _default: ({
26
26
  preparations: {
27
27
  id: number;
28
28
  promptbookVersion: string;
29
- modelUsage: {
29
+ usage: {
30
30
  price: {
31
31
  value: number;
32
32
  };
@@ -113,7 +113,7 @@ declare const _default: ({
113
113
  preparations: {
114
114
  id: number;
115
115
  promptbookVersion: string;
116
- modelUsage: {
116
+ usage: {
117
117
  price: {
118
118
  value: number;
119
119
  };
@@ -195,7 +195,7 @@ declare const _default: ({
195
195
  preparations: {
196
196
  id: number;
197
197
  promptbookVersion: string;
198
- modelUsage: {
198
+ usage: {
199
199
  price: {
200
200
  value: number;
201
201
  };
@@ -58,7 +58,7 @@ export declare function createPipelineExecutor(options: CreatePipelineExecutorOp
58
58
  export {};
59
59
  /**
60
60
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
61
- * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
61
+ * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
62
62
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
63
63
  * TODO: [♈] Probbably move expectations from templates to parameters
64
64
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -12,6 +12,8 @@ export type CreateLlmToolsFromEnvOptions = {
12
12
  *
13
13
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
14
14
  *
15
+ * @@@ .env
16
+ *
15
17
  * It looks for environment variables:
16
18
  * - `process.env.OPENAI_API_KEY`
17
19
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -20,6 +22,7 @@ export type CreateLlmToolsFromEnvOptions = {
20
22
  */
21
23
  export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
22
24
  /**
25
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
23
26
  * TODO: [🔼] !!! Export via `@promptbook/node`
24
27
  * TODO: @@@ write discussion about this - wizzard
25
28
  * TODO: Add Azure
@@ -1,10 +1,19 @@
1
1
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
+ type GetLlmToolsForCliOptions = {
3
+ /**
4
+ * @@@
5
+ *
6
+ * @default false
7
+ */
8
+ isCacheReloaded?: boolean;
9
+ };
2
10
  /**
3
11
  * Returns LLM tools for CLI
4
12
  *
5
13
  * @private within the repository - for CLI utils
6
14
  */
7
- export declare function getLlmToolsForCli(): LlmExecutionToolsWithTotalUsage;
15
+ export declare function getLlmToolsForCli(options?: GetLlmToolsForCliOptions): LlmExecutionToolsWithTotalUsage;
16
+ export {};
8
17
  /**
9
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
10
19
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -1,11 +1,20 @@
1
1
  import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
2
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
4
+ /**
5
+ * @@@
6
+ *
7
+ * @default false
8
+ */
9
+ isCacheReloaded?: boolean;
10
+ };
3
11
  /**
4
12
  * Returns LLM tools for testing purposes
5
13
  *
6
14
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
7
15
  */
8
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionToolsWithTotalUsage;
16
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: GetLlmToolsForTestingAndScriptsAndPlaygroundOptions): LlmExecutionToolsWithTotalUsage;
17
+ export {};
9
18
  /**
10
19
  * Note: [⚪] This should never be in any released package
11
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -7,4 +7,10 @@ export type CacheLlmToolsOptions = {
7
7
  * @default MemoryStorage
8
8
  */
9
9
  storage: PromptbookStorage<CacheItem>;
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default false
14
+ */
15
+ isReloaded?: boolean;
10
16
  };
@@ -5,10 +5,11 @@ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage'
5
5
  */
6
6
  export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
7
  /**
8
- * Total cost of the execution
8
+ * Get total cost of the execution up to this point
9
9
  */
10
- totalUsage: PromptResultUsage;
10
+ getTotalUsage(): PromptResultUsage;
11
11
  };
12
12
  /**
13
13
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
+ * Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
14
15
  */
@@ -46,4 +46,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
48
48
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
49
+ * TODO: [🍜] Auto use anonymous server in browser
49
50
  */
@@ -6,3 +6,6 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutio
6
6
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
7
  */
8
8
  export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
9
+ /**
10
+ * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
11
+ */
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
+ * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
6
  */
@@ -51,4 +51,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
51
51
  /**
52
52
  * TODO: [🍓] Allow to list compatible models with each variant
53
53
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
- */
54
+ * TODO: [🍜] Add anonymous option
55
+ */
@@ -26,3 +26,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
26
26
  */
27
27
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
28
28
  };
29
+ /**
30
+ * TODO: [🍜] Add anonymous option
31
+ */
@@ -10,6 +10,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
10
10
  */
11
11
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
12
  /**
13
+ * TODO: [🍜] Add anonymous option
13
14
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
15
  * TODO: Handle progress - support streaming
15
16
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly modelUsage: PromptResultUsage;
16
+ readonly usage: PromptResultUsage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standart order of json properties
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.62.0-0",
3
+ "version": "0.62.0",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -51,7 +51,7 @@
51
51
  }
52
52
  ],
53
53
  "peerDependencies": {
54
- "@promptbook/core": "0.62.0-0"
54
+ "@promptbook/core": "0.62.0"
55
55
  },
56
56
  "main": "./umd/index.umd.js",
57
57
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -1,14 +1,33 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('@anthropic-ai/sdk'), require('openai')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'prettier', 'prettier/parser-html', '@anthropic-ai/sdk', 'openai'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim, global.prettier, global.parserHtml, global.Anthropic, global.OpenAI));
5
- })(this, (function (exports, colors, promises, path, spaceTrim, prettier, parserHtml, Anthropic, OpenAI) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('colors'), require('fs/promises'), require('path'), require('spacetrim'), require('prettier'), require('prettier/parser-html'), require('dotenv'), require('@anthropic-ai/sdk'), require('openai')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'colors', 'fs/promises', 'path', 'spacetrim', 'prettier', 'prettier/parser-html', 'dotenv', '@anthropic-ai/sdk', 'openai'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-node"] = {}, global.colors, global.promises, global.path, global.spaceTrim, global.prettier, global.parserHtml, global.dotenv, global.Anthropic, global.OpenAI));
5
+ })(this, (function (exports, colors, promises, path, spaceTrim, prettier, parserHtml, dotenv, Anthropic, OpenAI) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
+ function _interopNamespace(e) {
10
+ if (e && e.__esModule) return e;
11
+ var n = Object.create(null);
12
+ if (e) {
13
+ Object.keys(e).forEach(function (k) {
14
+ if (k !== 'default') {
15
+ var d = Object.getOwnPropertyDescriptor(e, k);
16
+ Object.defineProperty(n, k, d.get ? d : {
17
+ enumerable: true,
18
+ get: function () { return e[k]; }
19
+ });
20
+ }
21
+ });
22
+ }
23
+ n["default"] = e;
24
+ return Object.freeze(n);
25
+ }
26
+
9
27
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
10
28
  var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
11
29
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
30
+ var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
12
31
  var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
13
32
  var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
14
33
 
@@ -660,7 +679,7 @@
660
679
  });
661
680
  }
662
681
 
663
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
682
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.62.0-1",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-1",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.62.0-1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-1",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.62.0-1",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-1",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.62.0-1",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-1",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
664
683
 
665
684
  /**
666
685
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2277,7 +2296,7 @@
2277
2296
  /**
2278
2297
  * The version of the Promptbook library
2279
2298
  */
2280
- var PROMPTBOOK_VERSION = '0.61.0';
2299
+ var PROMPTBOOK_VERSION = '0.62.0-1';
2281
2300
  // TODO: !!!! List here all the versions and annotate + put into script
2282
2301
 
2283
2302
  /**
@@ -3170,7 +3189,7 @@
3170
3189
  }
3171
3190
  /**
3172
3191
  * TODO: Use isVerbose here (not only pass to `preparePipeline`)
3173
- * TODO: [🧠] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3192
+ * TODO: [🧠][🌳] Use here `countTotalUsage` and put preparation and prepared pipiline to report
3174
3193
  * TODO: [🪂] Use maxParallelCount here (not only pass to `preparePipeline`)
3175
3194
  * TODO: [♈] Probbably move expectations from templates to parameters
3176
3195
  * TODO: [🧠] When not meet expectations in PROMPT_DIALOG, make some way to tell the user
@@ -3400,7 +3419,8 @@
3400
3419
  listModels: function () {
3401
3420
  return /* not await */ llmTools.listModels();
3402
3421
  },
3403
- get totalUsage() {
3422
+ getTotalUsage: function () {
3423
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3404
3424
  return totalUsage;
3405
3425
  },
3406
3426
  };
@@ -3598,7 +3618,7 @@
3598
3618
  id: 1,
3599
3619
  // TODO: [🍥]> date: $currentDate(),
3600
3620
  promptbookVersion: PROMPTBOOK_VERSION,
3601
- modelUsage: ZERO_USAGE,
3621
+ usage: ZERO_USAGE,
3602
3622
  };
3603
3623
  preparations = [
3604
3624
  // ...preparations
@@ -3647,7 +3667,7 @@
3647
3667
  promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3648
3668
  // ----- /Templates preparation -----
3649
3669
  // Note: Count total usage
3650
- currentPreparation.modelUsage = llmToolsWithUsage.totalUsage;
3670
+ currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3651
3671
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3652
3672
  }
3653
3673
  });
@@ -6030,7 +6050,7 @@
6030
6050
  * @private utility for initializating UncertainNumber
6031
6051
  */
6032
6052
  function uncertainNumber(value) {
6033
- if (value === null || value === undefined || Number.isNaN(NaN)) {
6053
+ if (value === null || value === undefined || Number.isNaN(value)) {
6034
6054
  return { value: 0, isUncertain: true };
6035
6055
  }
6036
6056
  return { value: value };
@@ -6344,6 +6364,7 @@
6344
6364
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6345
6365
  * TODO: Maybe make custom OpenaiError
6346
6366
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6367
+ * TODO: [🍜] Auto use anonymous server in browser
6347
6368
  */
6348
6369
 
6349
6370
  /**
@@ -7037,6 +7058,8 @@
7037
7058
  *
7038
7059
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7039
7060
  *
7061
+ * @@@ .env
7062
+ *
7040
7063
  * It looks for environment variables:
7041
7064
  * - `process.env.OPENAI_API_KEY`
7042
7065
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -7049,6 +7072,7 @@
7049
7072
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7050
7073
  }
7051
7074
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7075
+ dotenv__namespace.config();
7052
7076
  var llmTools = [];
7053
7077
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7054
7078
  llmTools.push(new OpenAiExecutionTools({
@@ -7073,6 +7097,7 @@
7073
7097
  }
7074
7098
  }
7075
7099
  /**
7100
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7076
7101
  * TODO: [🔼] !!! Export via `@promptbook/node`
7077
7102
  * TODO: @@@ write discussion about this - wizzard
7078
7103
  * TODO: Add Azure