@promptbook/cli 0.62.0-0 → 0.62.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/esm/index.es.js +69 -48
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/promptbook-collection/index.d.ts +3 -3
  4. package/esm/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  5. package/esm/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  6. package/esm/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  7. package/esm/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  8. package/esm/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  9. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  10. package/esm/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  11. package/esm/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  12. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  13. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  14. package/esm/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  15. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  16. package/esm/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
  17. package/package.json +2 -2
  18. package/umd/index.umd.js +91 -52
  19. package/umd/index.umd.js.map +1 -1
  20. package/umd/typings/promptbook-collection/index.d.ts +3 -3
  21. package/umd/typings/src/llm-providers/_common/createLlmToolsFromEnv.d.ts +3 -0
  22. package/umd/typings/src/llm-providers/_common/getLlmToolsForCli.d.ts +10 -1
  23. package/umd/typings/src/llm-providers/_common/getLlmToolsForTestingAndScriptsAndPlayground.d.ts +10 -1
  24. package/umd/typings/src/llm-providers/_common/utils/cache/CacheLlmToolsOptions.d.ts +6 -0
  25. package/umd/typings/src/llm-providers/_common/utils/count-total-usage/LlmExecutionToolsWithTotalUsage.d.ts +3 -2
  26. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionTools.d.ts +1 -0
  27. package/umd/typings/src/llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions.d.ts +3 -0
  28. package/umd/typings/src/llm-providers/anthropic-claude/playground/playground.d.ts +1 -0
  29. package/umd/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +1 -0
  30. package/umd/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +2 -1
  31. package/umd/typings/src/llm-providers/remote/interfaces/RemoteServerOptions.d.ts +3 -0
  32. package/umd/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  33. package/umd/typings/src/types/PipelineJson/PreparationJson.d.ts +1 -1
@@ -26,7 +26,7 @@ declare const _default: ({
26
26
  preparations: {
27
27
  id: number;
28
28
  promptbookVersion: string;
29
- modelUsage: {
29
+ usage: {
30
30
  price: {
31
31
  value: number;
32
32
  };
@@ -113,7 +113,7 @@ declare const _default: ({
113
113
  preparations: {
114
114
  id: number;
115
115
  promptbookVersion: string;
116
- modelUsage: {
116
+ usage: {
117
117
  price: {
118
118
  value: number;
119
119
  };
@@ -195,7 +195,7 @@ declare const _default: ({
195
195
  preparations: {
196
196
  id: number;
197
197
  promptbookVersion: string;
198
- modelUsage: {
198
+ usage: {
199
199
  price: {
200
200
  value: number;
201
201
  };
@@ -12,6 +12,8 @@ export type CreateLlmToolsFromEnvOptions = {
12
12
  *
13
13
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
14
14
  *
15
+ * @@@ .env
16
+ *
15
17
  * It looks for environment variables:
16
18
  * - `process.env.OPENAI_API_KEY`
17
19
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -20,6 +22,7 @@ export type CreateLlmToolsFromEnvOptions = {
20
22
  */
21
23
  export declare function createLlmToolsFromEnv(options?: CreateLlmToolsFromEnvOptions): LlmExecutionTools;
22
24
  /**
25
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
23
26
  * TODO: [🔼] !!! Export via `@promptbook/node`
24
27
  * TODO: @@@ write discussion about this - wizzard
25
28
  * TODO: Add Azure
@@ -1,10 +1,19 @@
1
1
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
2
+ type GetLlmToolsForCliOptions = {
3
+ /**
4
+ * @@@
5
+ *
6
+ * @default false
7
+ */
8
+ isCacheReloaded?: boolean;
9
+ };
2
10
  /**
3
11
  * Returns LLM tools for CLI
4
12
  *
5
13
  * @private within the repository - for CLI utils
6
14
  */
7
- export declare function getLlmToolsForCli(): LlmExecutionToolsWithTotalUsage;
15
+ export declare function getLlmToolsForCli(options?: GetLlmToolsForCliOptions): LlmExecutionToolsWithTotalUsage;
16
+ export {};
8
17
  /**
9
18
  * Note: [🟡] This code should never be published outside of `@promptbook/cli`
10
19
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -1,11 +1,20 @@
1
1
  import type { CreateLlmToolsFromEnvOptions } from './createLlmToolsFromEnv';
2
2
  import type { LlmExecutionToolsWithTotalUsage } from './utils/count-total-usage/LlmExecutionToolsWithTotalUsage';
3
+ type GetLlmToolsForTestingAndScriptsAndPlaygroundOptions = CreateLlmToolsFromEnvOptions & {
4
+ /**
5
+ * @@@
6
+ *
7
+ * @default false
8
+ */
9
+ isCacheReloaded?: boolean;
10
+ };
3
11
  /**
4
12
  * Returns LLM tools for testing purposes
5
13
  *
6
14
  * @private within the repository - JUST FOR TESTS, SCRIPTS AND PLAYGROUND
7
15
  */
8
- export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: CreateLlmToolsFromEnvOptions): LlmExecutionToolsWithTotalUsage;
16
+ export declare function getLlmToolsForTestingAndScriptsAndPlayground(options?: GetLlmToolsForTestingAndScriptsAndPlaygroundOptions): LlmExecutionToolsWithTotalUsage;
17
+ export {};
9
18
  /**
10
19
  * Note: [⚪] This should never be in any released package
11
20
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
@@ -7,4 +7,10 @@ export type CacheLlmToolsOptions = {
7
7
  * @default MemoryStorage
8
8
  */
9
9
  storage: PromptbookStorage<CacheItem>;
10
+ /**
11
+ * @@@
12
+ *
13
+ * @default false
14
+ */
15
+ isReloaded?: boolean;
10
16
  };
@@ -5,10 +5,11 @@ import type { PromptResultUsage } from '../../../../execution/PromptResultUsage'
5
5
  */
6
6
  export type LlmExecutionToolsWithTotalUsage = LlmExecutionTools & {
7
7
  /**
8
- * Total cost of the execution
8
+ * Get total cost of the execution up to this point
9
9
  */
10
- totalUsage: PromptResultUsage;
10
+ getTotalUsage(): PromptResultUsage;
11
11
  };
12
12
  /**
13
13
  * TODO: [👷‍♂️] @@@ Manual about construction of llmTools
14
+ * Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
14
15
  */
@@ -46,4 +46,5 @@ export declare class AnthropicClaudeExecutionTools implements LlmExecutionTools
46
46
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
47
47
  * TODO: Maybe make custom OpenaiError
48
48
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
49
+ * TODO: [🍜] Auto use anonymous server in browser
49
50
  */
@@ -6,3 +6,6 @@ import type { CommonExecutionToolsOptions } from '../../execution/CommonExecutio
6
6
  * This extends Anthropic's `ClientOptions` with are directly passed to the Anthropic client.
7
7
  */
8
8
  export type AnthropicClaudeExecutionToolsOptions = CommonExecutionToolsOptions & ClientOptions;
9
+ /**
10
+ * TODO: [🍜] Auto add WebGPT / Promptbook.studio anonymous server in browser
11
+ */
@@ -1,5 +1,6 @@
1
1
  #!/usr/bin/env ts-node
2
2
  export {};
3
3
  /**
4
+ * TODO: [🍜] Playground with WebGPT / Promptbook.studio anonymous server
4
5
  * TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
5
6
  */
@@ -51,4 +51,5 @@ export declare class RemoteLlmExecutionTools implements LlmExecutionTools {
51
51
  /**
52
52
  * TODO: [🍓] Allow to list compatible models with each variant
53
53
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
54
- */
54
+ * TODO: [🍜] Add anonymous option
55
+ */
@@ -26,3 +26,6 @@ export type RemoteServerOptions = CommonExecutionToolsOptions & {
26
26
  */
27
27
  createLlmExecutionTools(clientId: client_id): LlmExecutionTools;
28
28
  };
29
+ /**
30
+ * TODO: [🍜] Add anonymous option
31
+ */
@@ -10,6 +10,7 @@ import type { RemoteServerOptions } from './interfaces/RemoteServerOptions';
10
10
  */
11
11
  export declare function startRemoteServer(options: RemoteServerOptions): IDestroyable;
12
12
  /**
13
+ * TODO: [🍜] Add anonymous option
13
14
  * TODO: [⚖] Expose the collection to be able to connect to same collection via createCollectionFromUrl
14
15
  * TODO: Handle progress - support streaming
15
16
  * TODO: [🗯] Do not hang up immediately but wait until client closes OR timeout
@@ -13,7 +13,7 @@ export type PreparationJson = {
13
13
  /**
14
14
  * Usage of the prompt execution
15
15
  */
16
- readonly modelUsage: PromptResultUsage;
16
+ readonly usage: PromptResultUsage;
17
17
  };
18
18
  /**
19
19
  * TODO: [🍙] Make some standart order of json properties
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.62.0-0",
3
+ "version": "0.62.0-1",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -54,7 +54,7 @@
54
54
  }
55
55
  ],
56
56
  "peerDependencies": {
57
- "@promptbook/core": "0.62.0-0"
57
+ "@promptbook/core": "0.62.0-1"
58
58
  },
59
59
  "main": "./umd/index.umd.js",
60
60
  "module": "./esm/index.es.js",
package/umd/index.umd.js CHANGED
@@ -1,17 +1,36 @@
1
1
  (function (global, factory) {
2
- typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('@anthropic-ai/sdk'), require('openai'), require('glob-promise')) :
3
- typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', '@anthropic-ai/sdk', 'openai', 'glob-promise'], factory) :
4
- (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.Anthropic, global.OpenAI, global.glob));
5
- })(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, Anthropic, OpenAI, glob) { 'use strict';
2
+ typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('commander'), require('spacetrim'), require('colors'), require('waitasecond'), require('fs/promises'), require('path'), require('prettier'), require('prettier/parser-html'), require('crypto-js/enc-hex'), require('crypto-js/sha256'), require('dotenv'), require('@anthropic-ai/sdk'), require('openai'), require('glob-promise')) :
3
+ typeof define === 'function' && define.amd ? define(['exports', 'commander', 'spacetrim', 'colors', 'waitasecond', 'fs/promises', 'path', 'prettier', 'prettier/parser-html', 'crypto-js/enc-hex', 'crypto-js/sha256', 'dotenv', '@anthropic-ai/sdk', 'openai', 'glob-promise'], factory) :
4
+ (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global["promptbook-cli"] = {}, global.commander, global.spaceTrim, global.colors, global.waitasecond, global.promises, global.path, global.prettier, global.parserHtml, global.hexEncoder, global.sha256, global.dotenv, global.Anthropic, global.OpenAI, global.glob));
5
+ })(this, (function (exports, commander, spaceTrim, colors, waitasecond, promises, path, prettier, parserHtml, hexEncoder, sha256, dotenv, Anthropic, OpenAI, glob) { 'use strict';
6
6
 
7
7
  function _interopDefaultLegacy (e) { return e && typeof e === 'object' && 'default' in e ? e : { 'default': e }; }
8
8
 
9
+ function _interopNamespace(e) {
10
+ if (e && e.__esModule) return e;
11
+ var n = Object.create(null);
12
+ if (e) {
13
+ Object.keys(e).forEach(function (k) {
14
+ if (k !== 'default') {
15
+ var d = Object.getOwnPropertyDescriptor(e, k);
16
+ Object.defineProperty(n, k, d.get ? d : {
17
+ enumerable: true,
18
+ get: function () { return e[k]; }
19
+ });
20
+ }
21
+ });
22
+ }
23
+ n["default"] = e;
24
+ return Object.freeze(n);
25
+ }
26
+
9
27
  var commander__default = /*#__PURE__*/_interopDefaultLegacy(commander);
10
28
  var spaceTrim__default = /*#__PURE__*/_interopDefaultLegacy(spaceTrim);
11
29
  var colors__default = /*#__PURE__*/_interopDefaultLegacy(colors);
12
30
  var parserHtml__default = /*#__PURE__*/_interopDefaultLegacy(parserHtml);
13
31
  var hexEncoder__default = /*#__PURE__*/_interopDefaultLegacy(hexEncoder);
14
32
  var sha256__default = /*#__PURE__*/_interopDefaultLegacy(sha256);
33
+ var dotenv__namespace = /*#__PURE__*/_interopNamespace(dotenv);
15
34
  var Anthropic__default = /*#__PURE__*/_interopDefaultLegacy(Anthropic);
16
35
  var OpenAI__default = /*#__PURE__*/_interopDefaultLegacy(OpenAI);
17
36
  var glob__default = /*#__PURE__*/_interopDefaultLegacy(glob);
@@ -154,7 +173,7 @@
154
173
  /**
155
174
  * The version of the Promptbook library
156
175
  */
157
- var PROMPTBOOK_VERSION = '0.61.0';
176
+ var PROMPTBOOK_VERSION = '0.62.0-0';
158
177
  // TODO: !!!! List here all the versions and annotate + put into script
159
178
 
160
179
  /**
@@ -752,7 +771,7 @@
752
771
  });
753
772
  }
754
773
 
755
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.61.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.61.0",modelUsage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
774
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.62.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[{id:1,promptbookVersion:"0.62.0-0",usage:{price:{value:0},input:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}},output:{tokensCount:{value:0},charactersCount:{value:0},wordsCount:{value:0},sentencesCount:{value:0},linesCount:{value:0},paragraphsCount:{value:0},pagesCount:{value:0}}}}],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
756
775
 
757
776
  /**
758
777
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -3486,7 +3505,8 @@
3486
3505
  listModels: function () {
3487
3506
  return /* not await */ llmTools.listModels();
3488
3507
  },
3489
- get totalUsage() {
3508
+ getTotalUsage: function () {
3509
+ // <- Note: [🥫] Not using getter `get totalUsage` but `getTotalUsage` to allow this object to be proxied
3490
3510
  return totalUsage;
3491
3511
  },
3492
3512
  };
@@ -3684,7 +3704,7 @@
3684
3704
  id: 1,
3685
3705
  // TODO: [🍥]> date: $currentDate(),
3686
3706
  promptbookVersion: PROMPTBOOK_VERSION,
3687
- modelUsage: ZERO_USAGE,
3707
+ usage: ZERO_USAGE,
3688
3708
  };
3689
3709
  preparations = [
3690
3710
  // ...preparations
@@ -3733,7 +3753,7 @@
3733
3753
  promptTemplatesPrepared = (_c.sent()).promptTemplatesPrepared;
3734
3754
  // ----- /Templates preparation -----
3735
3755
  // Note: Count total usage
3736
- currentPreparation.modelUsage = llmToolsWithUsage.totalUsage;
3756
+ currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
3737
3757
  return [2 /*return*/, __assign(__assign({}, pipeline), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
3738
3758
  }
3739
3759
  });
@@ -6201,7 +6221,7 @@
6201
6221
  switch (_a.label) {
6202
6222
  case 0:
6203
6223
  filename = this.getFilenameForKey(key);
6204
- fileContent = JSON.stringify(value, null, 4);
6224
+ fileContent = stringifyPipelineJson(value);
6205
6225
  return [4 /*yield*/, promises.mkdir(path.dirname(filename), { recursive: true })];
6206
6226
  case 1:
6207
6227
  _a.sent(); // <- [0]
@@ -6268,7 +6288,7 @@
6268
6288
  * @private utility for initializating UncertainNumber
6269
6289
  */
6270
6290
  function uncertainNumber(value) {
6271
- if (value === null || value === undefined || Number.isNaN(NaN)) {
6291
+ if (value === null || value === undefined || Number.isNaN(value)) {
6272
6292
  return { value: 0, isUncertain: true };
6273
6293
  }
6274
6294
  return { value: value };
@@ -6582,6 +6602,7 @@
6582
6602
  * TODO: Maybe Create some common util for callChatModel and callCompletionModel
6583
6603
  * TODO: Maybe make custom OpenaiError
6584
6604
  * TODO: [🧠][🈁] Maybe use `isDeterministic` from options
6605
+ * TODO: [🍜] Auto use anonymous server in browser
6585
6606
  */
6586
6607
 
6587
6608
  /**
@@ -7275,6 +7296,8 @@
7275
7296
  *
7276
7297
  * Note: This function is not cached, every call creates new instance of `LlmExecutionTools`
7277
7298
  *
7299
+ * @@@ .env
7300
+ *
7278
7301
  * It looks for environment variables:
7279
7302
  * - `process.env.OPENAI_API_KEY`
7280
7303
  * - `process.env.ANTHROPIC_CLAUDE_API_KEY`
@@ -7287,6 +7310,7 @@
7287
7310
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7288
7311
  }
7289
7312
  var _a = options.isVerbose, isVerbose = _a === void 0 ? false : _a;
7313
+ dotenv__namespace.config(); // <- TODO: !!!!!! Double check [🟢]
7290
7314
  var llmTools = [];
7291
7315
  if (typeof process.env.OPENAI_API_KEY === 'string') {
7292
7316
  llmTools.push(new OpenAiExecutionTools({
@@ -7311,6 +7335,7 @@
7311
7335
  }
7312
7336
  }
7313
7337
  /**
7338
+ * TODO: [🍜] Use `createLlmToolsFromConfiguration`
7314
7339
  * TODO: [🔼] !!! Export via `@promptbook/node`
7315
7340
  * TODO: @@@ write discussion about this - wizzard
7316
7341
  * TODO: Add Azure
@@ -7394,9 +7419,9 @@
7394
7419
  function cacheLlmTools(llmTools, options) {
7395
7420
  var _this = this;
7396
7421
  if (options === void 0) { options = {}; }
7397
- var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a;
7422
+ var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a, _b = options.isReloaded, isReloaded = _b === void 0 ? false : _b;
7398
7423
  var proxyTools = __assign(__assign({}, llmTools), {
7399
- // <- TODO: !!!!!! Is this working?
7424
+ // <- Note: [🥫]
7400
7425
  get title() {
7401
7426
  // TODO: [🧠] Maybe put here some suffix
7402
7427
  return llmTools.title;
@@ -7409,47 +7434,54 @@
7409
7434
  return /* not await */ llmTools.listModels();
7410
7435
  } });
7411
7436
  var callCommonModel = function (prompt) { return __awaiter(_this, void 0, void 0, function () {
7412
- var key, cacheItem, promptResult, _a;
7413
- return __generator(this, function (_b) {
7414
- switch (_b.label) {
7437
+ var key, cacheItem, _a, promptResult, _b;
7438
+ return __generator(this, function (_c) {
7439
+ switch (_c.label) {
7415
7440
  case 0:
7416
7441
  key = titleToName(prompt.title.substring(0, MAX_FILENAME_LENGTH - 10) +
7417
7442
  '-' +
7418
7443
  sha256__default["default"](hexEncoder__default["default"].parse(JSON.stringify(prompt.parameters))).toString( /* hex */));
7444
+ if (!!isReloaded) return [3 /*break*/, 2];
7419
7445
  return [4 /*yield*/, storage.getItem(key)];
7420
7446
  case 1:
7421
- cacheItem = _b.sent();
7447
+ _a = _c.sent();
7448
+ return [3 /*break*/, 3];
7449
+ case 2:
7450
+ _a = null;
7451
+ _c.label = 3;
7452
+ case 3:
7453
+ cacheItem = _a;
7422
7454
  if (cacheItem) {
7423
7455
  return [2 /*return*/, cacheItem.promptResult];
7424
7456
  }
7425
- _a = prompt.modelRequirements.modelVariant;
7426
- switch (_a) {
7427
- case 'CHAT': return [3 /*break*/, 2];
7428
- case 'COMPLETION': return [3 /*break*/, 4];
7429
- case 'EMBEDDING': return [3 /*break*/, 6];
7457
+ _b = prompt.modelRequirements.modelVariant;
7458
+ switch (_b) {
7459
+ case 'CHAT': return [3 /*break*/, 4];
7460
+ case 'COMPLETION': return [3 /*break*/, 6];
7461
+ case 'EMBEDDING': return [3 /*break*/, 8];
7430
7462
  }
7431
- return [3 /*break*/, 8];
7432
- case 2: return [4 /*yield*/, llmTools.callChatModel(prompt)];
7433
- case 3:
7434
- promptResult = _b.sent();
7435
- return [3 /*break*/, 9];
7436
- case 4: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
7463
+ return [3 /*break*/, 10];
7464
+ case 4: return [4 /*yield*/, llmTools.callChatModel(prompt)];
7437
7465
  case 5:
7438
- promptResult = _b.sent();
7439
- return [3 /*break*/, 9];
7440
- case 6: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
7466
+ promptResult = _c.sent();
7467
+ return [3 /*break*/, 11];
7468
+ case 6: return [4 /*yield*/, llmTools.callCompletionModel(prompt)];
7441
7469
  case 7:
7442
- promptResult = _b.sent();
7443
- return [3 /*break*/, 9];
7444
- case 8: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
7445
- case 9: return [4 /*yield*/, storage.setItem(key, {
7470
+ promptResult = _c.sent();
7471
+ return [3 /*break*/, 11];
7472
+ case 8: return [4 /*yield*/, llmTools.callEmbeddingModel(prompt)];
7473
+ case 9:
7474
+ promptResult = _c.sent();
7475
+ return [3 /*break*/, 11];
7476
+ case 10: throw new PipelineExecutionError("Unknown model variant \"".concat(prompt.modelRequirements.modelVariant, "\""));
7477
+ case 11: return [4 /*yield*/, storage.setItem(key, {
7446
7478
  date: $currentDate(),
7447
7479
  promptbookVersion: PROMPTBOOK_VERSION,
7448
7480
  prompt: prompt,
7449
7481
  promptResult: promptResult,
7450
7482
  })];
7451
- case 10:
7452
- _b.sent();
7483
+ case 12:
7484
+ _c.sent();
7453
7485
  return [2 /*return*/, promptResult];
7454
7486
  }
7455
7487
  });
@@ -7492,14 +7524,16 @@
7492
7524
  *
7493
7525
  * @private within the repository - for CLI utils
7494
7526
  */
7495
- function getLlmToolsForCli() {
7527
+ function getLlmToolsForCli(options) {
7496
7528
  if (!isRunningInNode()) {
7497
7529
  throw new EnvironmentMismatchError('Function `getLlmToolsForTestingAndScriptsAndPlayground` works only in Node.js environment');
7498
7530
  }
7531
+ var _a = (options !== null && options !== void 0 ? options : {}).isCacheReloaded, isCacheReloaded = _a === void 0 ? false : _a;
7499
7532
  return cacheLlmTools(countTotalUsage(
7500
7533
  // <- Note: for example here we don`t want the [🌯]
7501
7534
  createLlmToolsFromEnv()), {
7502
7535
  storage: new FilesStorage({ cacheFolderPath: path.join(process.cwd(), EXECUTIONS_CACHE_DIRNAME) }),
7536
+ isReloaded: isCacheReloaded,
7503
7537
  });
7504
7538
  }
7505
7539
  /**
@@ -7514,24 +7548,26 @@
7514
7548
  */
7515
7549
  function initializeMakeCommand(program) {
7516
7550
  var _this = this;
7517
- var helloCommand = program.command('make');
7518
- helloCommand.description(spaceTrim__default["default"]("\n Makes a new pipeline collection in given folder\n "));
7519
- helloCommand.argument('<path>', 'Path to promptbook directory');
7520
- helloCommand.option('--project-name', "Name of the project for whom collection is", 'Project');
7521
- helloCommand.option('-f, --format <format>', spaceTrim__default["default"]("\n Output format of builded collection \"javascript\", \"typescript\" or \"json\"\n\n Note: You can use multiple formats separated by comma\n "), 'javascript' /* <- Note: [🏳‍🌈] */);
7522
- helloCommand.option('--no-validation', "Do not validate logic of pipelines in collection", true);
7523
- helloCommand.option('--validation', "Types of validations separated by comma (options \"logic\",\"imports\")", 'logic,imports');
7524
- helloCommand.option('--verbose', "Is verbose", false);
7525
- helloCommand.option('-o, --out-file <path>', spaceTrim__default["default"]("\n Where to save the builded collection\n\n Note: If you keep it \"".concat(PIPELINE_COLLECTION_BASE_FILENAME, "\" it will be saved in the root of the promptbook directory\n If you set it to a path, it will be saved in that path\n BUT you can use only one format and set correct extension\n ")), PIPELINE_COLLECTION_BASE_FILENAME);
7526
- helloCommand.action(function (path$1, _a) {
7527
- var projectName = _a.projectName, format = _a.format, validation = _a.validation, verbose = _a.verbose, outFile = _a.outFile;
7551
+ var makeCommand = program.command('make');
7552
+ makeCommand.description(spaceTrim__default["default"]("\n Makes a new pipeline collection in given folder\n "));
7553
+ makeCommand.argument('<path>', 'Path to promptbook directory');
7554
+ makeCommand.option('--project-name', "Name of the project for whom collection is", 'Project');
7555
+ makeCommand.option('-f, --format <format>', spaceTrim__default["default"]("\n Output format of builded collection \"javascript\", \"typescript\" or \"json\"\n\n Note: You can use multiple formats separated by comma\n "), 'javascript' /* <- Note: [🏳‍🌈] */);
7556
+ makeCommand.option('--no-validation', "Do not validate logic of pipelines in collection", true);
7557
+ makeCommand.option('--validation', "Types of validations separated by comma (options \"logic\",\"imports\")", 'logic,imports');
7558
+ makeCommand.option('--reload-cache', "Use LLM models even if cached ", false);
7559
+ makeCommand.option('--verbose', "Is verbose", false);
7560
+ makeCommand.option('-o, --out-file <path>', spaceTrim__default["default"]("\n Where to save the builded collection\n\n Note: If you keep it \"".concat(PIPELINE_COLLECTION_BASE_FILENAME, "\" it will be saved in the root of the promptbook directory\n If you set it to a path, it will be saved in that path\n BUT you can use only one format and set correct extension\n ")), PIPELINE_COLLECTION_BASE_FILENAME);
7561
+ makeCommand.action(function (path$1, _a) {
7562
+ var projectName = _a.projectName, format = _a.format, validation = _a.validation, reloadCache = _a.reloadCache, verbose = _a.verbose, outFile = _a.outFile;
7528
7563
  return __awaiter(_this, void 0, void 0, function () {
7529
- var isVerbose, formats, validations, llmTools, collection, validations_1, validations_1_1, validation_1, _b, _c, pipelineUrl, pipeline, e_1_1, e_2_1, collectionJson, collectionJsonString, saveFile;
7564
+ var isCacheReloaded, isVerbose, formats, validations, llmTools, collection, validations_1, validations_1_1, validation_1, _b, _c, pipelineUrl, pipeline, e_1_1, e_2_1, collectionJson, collectionJsonString, saveFile;
7530
7565
  var e_2, _d, e_1, _e;
7531
7566
  var _this = this;
7532
7567
  return __generator(this, function (_f) {
7533
7568
  switch (_f.label) {
7534
7569
  case 0:
7570
+ isCacheReloaded = reloadCache;
7535
7571
  isVerbose = verbose;
7536
7572
  formats = (format || '')
7537
7573
  .split(',')
@@ -7545,11 +7581,14 @@
7545
7581
  console.error(colors__default["default"].red("You can use only one format when saving to a file"));
7546
7582
  process.exit(1);
7547
7583
  }
7548
- llmTools = getLlmToolsForCli();
7584
+ llmTools = getLlmToolsForCli({
7585
+ isCacheReloaded: isCacheReloaded,
7586
+ });
7549
7587
  return [4 /*yield*/, createCollectionFromDirectory(path$1, {
7550
7588
  llmTools: llmTools,
7551
7589
  isVerbose: isVerbose,
7552
7590
  isRecursive: true,
7591
+ // <- TODO: [🍖] isCacheReloaded
7553
7592
  })];
7554
7593
  case 1:
7555
7594
  collection = _f.sent();
@@ -7658,7 +7697,7 @@
7658
7697
  if (isVerbose) {
7659
7698
  // TODO: !!!!!! Test that this works
7660
7699
  console.info(colors__default["default"].green("Collection builded"));
7661
- console.info(colors__default["default"].cyan(usageToHuman(llmTools.totalUsage)));
7700
+ console.info(colors__default["default"].cyan(usageToHuman(llmTools.getTotalUsage())));
7662
7701
  }
7663
7702
  process.exit(0);
7664
7703
  return [2 /*return*/];