@promptbook/cli 0.66.0-0 → 0.66.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -5,6 +5,7 @@ import type { AnthropicClaudeExecutionToolsOptions } from '../llm-providers/anth
5
5
  import type { AnthropicClaudeExecutionToolsDirectOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
6
6
  import type { AnthropicClaudeExecutionToolsProxiedOptions } from '../llm-providers/anthropic-claude/AnthropicClaudeExecutionToolsOptions';
7
7
  import { createAnthropicClaudeExecutionTools } from '../llm-providers/anthropic-claude/createAnthropicClaudeExecutionTools';
8
+ import { _ } from '../llm-providers/anthropic-claude/register1';
8
9
  export { PROMPTBOOK_VERSION };
9
10
  export { ANTHROPIC_CLAUDE_MODELS };
10
11
  export { AnthropicClaudeExecutionTools };
@@ -12,3 +13,4 @@ export type { AnthropicClaudeExecutionToolsOptions };
12
13
  export type { AnthropicClaudeExecutionToolsDirectOptions };
13
14
  export type { AnthropicClaudeExecutionToolsProxiedOptions };
14
15
  export { createAnthropicClaudeExecutionTools };
16
+ export { _ };
@@ -15,6 +15,9 @@ import { MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL } from '../config';
15
15
  import { EXECUTIONS_CACHE_DIRNAME } from '../config';
16
16
  import { PIPELINE_COLLECTION_BASE_FILENAME } from '../config';
17
17
  import { RESERVED_PARAMETER_NAMES } from '../config';
18
+ import { DEFAULT_REMOTE_URL } from '../config';
19
+ import { DEFAULT_REMOTE_URL_PATH } from '../config';
20
+ import { BOILERPLATE_LLM_TOOLS_CONFIGURATION_ } from '../config';
18
21
  import { pipelineJsonToString } from '../conversion/pipelineJsonToString';
19
22
  import type { PipelineStringToJsonOptions } from '../conversion/pipelineStringToJson';
20
23
  import { pipelineStringToJson } from '../conversion/pipelineStringToJson';
@@ -45,7 +48,6 @@ import { CallbackInterfaceTools } from '../knowledge/dialogs/callback/CallbackIn
45
48
  import type { CallbackInterfaceToolsOptions } from '../knowledge/dialogs/callback/CallbackInterfaceToolsOptions';
46
49
  import { prepareKnowledgePieces } from '../knowledge/prepare-knowledge/_common/prepareKnowledgePieces';
47
50
  import { prepareKnowledgeFromMarkdown } from '../knowledge/prepare-knowledge/markdown/prepareKnowledgeFromMarkdown';
48
- import { LLM_CONFIGURATION_BOILERPLATES } from '../llm-providers/_common/config';
49
51
  import { createLlmToolsFromConfiguration } from '../llm-providers/_common/createLlmToolsFromConfiguration';
50
52
  import { cacheLlmTools } from '../llm-providers/_common/utils/cache/cacheLlmTools';
51
53
  import { countTotalUsage } from '../llm-providers/_common/utils/count-total-usage/countTotalUsage';
@@ -80,6 +82,9 @@ export { MAX_KNOWLEDGE_SOURCES_SCRAPING_TOTAL };
80
82
  export { EXECUTIONS_CACHE_DIRNAME };
81
83
  export { PIPELINE_COLLECTION_BASE_FILENAME };
82
84
  export { RESERVED_PARAMETER_NAMES };
85
+ export { DEFAULT_REMOTE_URL };
86
+ export { DEFAULT_REMOTE_URL_PATH };
87
+ export { BOILERPLATE_LLM_TOOLS_CONFIGURATION_ };
83
88
  export { pipelineJsonToString };
84
89
  export type { PipelineStringToJsonOptions };
85
90
  export { pipelineStringToJson };
@@ -110,7 +115,6 @@ export { CallbackInterfaceTools };
110
115
  export type { CallbackInterfaceToolsOptions };
111
116
  export { prepareKnowledgePieces };
112
117
  export { prepareKnowledgeFromMarkdown };
113
- export { LLM_CONFIGURATION_BOILERPLATES };
114
118
  export { createLlmToolsFromConfiguration };
115
119
  export { cacheLlmTools };
116
120
  export { countTotalUsage };
@@ -1,3 +1,4 @@
1
+ import type { LlmToolsConfiguration } from './llm-providers/_common/LlmToolsConfiguration';
1
2
  /**
2
3
  * Warning message for the generated sections and files files
3
4
  *
@@ -107,9 +108,30 @@ export declare const RESERVED_PARAMETER_RESTRICTED: string;
107
108
  export declare const MOMENT_ARG_THRESHOLDS: {
108
109
  readonly ss: 3;
109
110
  };
111
+ /**
112
+ * @@@
113
+ *
114
+ * @public exported from `@promptbook/core`
115
+ */
116
+ export declare const DEFAULT_REMOTE_URL = "https://api.pavolhejny.com/";
117
+ /**
118
+ * @@@
119
+ *
120
+ * @public exported from `@promptbook/core`
121
+ */
122
+ export declare const DEFAULT_REMOTE_URL_PATH = "/promptbook/socket.io";
123
+ /**
124
+ * @@@
125
+ *
126
+ * @public exported from `@promptbook/core`
127
+ */
128
+ export declare const BOILERPLATE_LLM_TOOLS_CONFIGURATION_: LlmToolsConfiguration;
110
129
  /**
111
130
  * @@@
112
131
  *
113
132
  * @private within the repository
114
133
  */
115
134
  export declare const DEBUG_ALLOW_PAYED_TESTING: boolean;
135
+ /**
136
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
137
+ */
@@ -28,6 +28,16 @@ export type LlmExecutionTools = {
28
28
  * @example "Use all models from OpenAI"
29
29
  */
30
30
  readonly description: string_markdown;
31
+ /**
32
+ * Check comfiguration
33
+ *
34
+ * @returns nothing if configuration is correct
35
+ * @throws {Error} if configuration is incorrect
36
+ */
37
+ /**
38
+ * List all available models that can be used
39
+ */
40
+ listModels(): Promisable<Array<AvailableModel>>;
31
41
  /**
32
42
  * Calls a chat model
33
43
  */
@@ -40,10 +50,6 @@ export type LlmExecutionTools = {
40
50
  * Calls an embedding model
41
51
  */
42
52
  callEmbeddingModel?(prompt: Prompt): Promise<EmbeddingPromptResult>;
43
- /**
44
- * List all available models that can be used
45
- */
46
- listModels(): Promisable<Array<AvailableModel>>;
47
53
  };
48
54
  /**
49
55
  * Represents a model that can be used for prompt execution
@@ -63,8 +69,8 @@ export type AvailableModel = {
63
69
  readonly modelVariant: ModelVariant;
64
70
  };
65
71
  /**
72
+ * TODO: Implement destroyable pattern to free resources
66
73
  * TODO: [🏳] Add `callTranslationModel`
67
- * TODO: Maybe reorder `listModels` and put it befor `callChatModel`, `callCompletionModel`, `callEmbeddingModel`
68
74
  * TODO: [🧠] Emulation of one type of model with another one - emuate chat with completion; emulate translation with chat
69
75
  * TODO: [🍓][♐] Some heuristic to pick the best model in listed models
70
76
  * TODO: [🧠] Should or should not there be a word "GPT" in both callCompletionModel and callChatModel
@@ -1,15 +1,10 @@
1
1
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
2
2
  import type { TODO_any } from '../../utils/organization/TODO_any';
3
- import type { LlmToolsConfiguration } from './LlmToolsConfiguration';
4
- /**
5
- * @public exported from `@promptbook/core`
6
- */
7
- export declare const LLM_CONFIGURATION_BOILERPLATES: LlmToolsConfiguration;
8
3
  /**
9
4
  * @private internal type for `createLlmToolsFromConfiguration`
10
5
  */
11
6
  export declare const EXECUTION_TOOLS_CLASSES: Record<`create${string}`, (options: TODO_any) => LlmExecutionTools>;
12
7
  /**
13
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
8
+ * TODO: !!!!!!! Make global register for this
14
9
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
15
10
  */
@@ -0,0 +1,18 @@
1
+ import type Anthropic from '@anthropic-ai/sdk';
2
+ import type { PartialDeep } from 'type-fest';
3
+ import type { PromptResultUsage } from '../../execution/PromptResultUsage';
4
+ import type { Prompt } from '../../types/Prompt';
5
+ /**
6
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
7
+ *
8
+ * @param promptContent The content of the prompt
9
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
10
+ * @param rawResponse The raw response from Anthropic Claude API
11
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
12
+ * @private internal utility of `AnthropicClaudeExecutionTools`
13
+ */
14
+ export declare function computeAnthropicClaudeUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
15
+ resultContent: string, rawResponse: PartialDeep<Pick<Anthropic.Messages.Message, 'model' | 'usage'>>): PromptResultUsage;
16
+ /**
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
18
+ */
@@ -0,0 +1,4 @@
1
+ export {};
2
+ /**
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
4
+ */
@@ -0,0 +1,4 @@
1
+ /**
2
+ * @public exported from `@promptbook/anthropic-claude`
3
+ */
4
+ export declare const _: undefined;
@@ -1,4 +1,5 @@
1
1
  import type OpenAI from 'openai';
2
+ import type { PartialDeep } from 'type-fest';
2
3
  import type { PromptResultUsage } from '../../execution/PromptResultUsage';
3
4
  import type { Prompt } from '../../types/Prompt';
4
5
  /**
@@ -11,4 +12,7 @@ import type { Prompt } from '../../types/Prompt';
11
12
  * @private internal utility of `OpenAiExecutionTools`
12
13
  */
13
14
  export declare function computeOpenaiUsage(promptContent: Prompt['content'], // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
14
- resultContent: string, rawResponse: Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>): PromptResultUsage;
15
+ resultContent: string, rawResponse: PartialDeep<Pick<OpenAI.Chat.Completions.ChatCompletion | OpenAI.Completions.Completion | OpenAI.Embeddings.CreateEmbeddingResponse, 'model' | 'usage'>>): PromptResultUsage;
16
+ /**
17
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
18
+ */
@@ -1 +1,4 @@
1
1
  export {};
2
+ /**
3
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
4
+ */
@@ -25,7 +25,7 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
25
25
  /**
26
26
  * If set, only these models will be listed as available
27
27
  *
28
- * TODO: [🧠] !!!! Figure out better solution
28
+ * TODO: [🧠] !!!!!! Figure out better solution
29
29
  */
30
30
  readonly models?: Array<AvailableModel>;
31
31
  /**
@@ -51,3 +51,6 @@ export type RemoteLlmExecutionToolsOptions = CommonExecutionToolsOptions & {
51
51
  */
52
52
  readonly clientId: client_id;
53
53
  });
54
+ /**
55
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
56
+ */
@@ -7,3 +7,6 @@ import type { really_any } from '../organization/really_any';
7
7
  * @public exported from `@promptbook/utils`
8
8
  */
9
9
  export declare function $getGlobalScope(): really_any;
10
+ /***
11
+ * TODO: !!!!! Make private and promptbook registry from this
12
+ */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/cli",
3
- "version": "0.66.0-0",
3
+ "version": "0.66.0-1",
4
4
  "description": "Supercharge your use of large language models",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -39,7 +39,7 @@
39
39
  /**
40
40
  * The version of the Promptbook library
41
41
  */
42
- var PROMPTBOOK_VERSION = '0.65.0';
42
+ var PROMPTBOOK_VERSION = '0.66.0-0';
43
43
  // TODO: !!!! List here all the versions and annotate + put into script
44
44
 
45
45
  /*! *****************************************************************************
@@ -322,6 +322,9 @@
322
322
  * @private within the repository
323
323
  */
324
324
  var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
325
+ /**
326
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
327
+ */
325
328
 
326
329
  /**
327
330
  * Initializes `about` command for Promptbook CLI utilities
@@ -851,7 +854,7 @@
851
854
  });
852
855
  }
853
856
 
854
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
857
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
855
858
 
856
859
  /**
857
860
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -6825,6 +6828,15 @@
6825
6828
  * TODO: [🎰] Some mechanism to auto-update available models
6826
6829
  */
6827
6830
 
6831
+ /**
6832
+ * Get current date in ISO 8601 format
6833
+ *
6834
+ * @private internal utility
6835
+ */
6836
+ function getCurrentIsoDate() {
6837
+ return new Date().toISOString();
6838
+ }
6839
+
6828
6840
  /**
6829
6841
  * Helper of usage compute
6830
6842
  *
@@ -6859,13 +6871,42 @@
6859
6871
  }
6860
6872
 
6861
6873
  /**
6862
- * Get current date in ISO 8601 format
6874
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6863
6875
  *
6864
- * @private internal utility
6876
+ * @param promptContent The content of the prompt
6877
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6878
+ * @param rawResponse The raw response from Anthropic Claude API
6879
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6880
+ * @private internal utility of `AnthropicClaudeExecutionTools`
6865
6881
  */
6866
- function getCurrentIsoDate() {
6867
- return new Date().toISOString();
6882
+ function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6883
+ resultContent, rawResponse) {
6884
+ var _a, _b;
6885
+ if (rawResponse.usage === undefined) {
6886
+ throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6887
+ }
6888
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6889
+ throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6890
+ }
6891
+ var inputTokens = rawResponse.usage.input_tokens;
6892
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6893
+ var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6894
+ var price;
6895
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6896
+ price = uncertainNumber();
6897
+ }
6898
+ else {
6899
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6900
+ }
6901
+ return {
6902
+ price: price,
6903
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6904
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6905
+ };
6868
6906
  }
6907
+ /**
6908
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
6909
+ */
6869
6910
 
6870
6911
  /**
6871
6912
  * Execution Tools for calling Anthropic Claude API.
@@ -6886,9 +6927,8 @@
6886
6927
  var anthropicOptions = __assign({}, options);
6887
6928
  delete anthropicOptions.isVerbose;
6888
6929
  delete anthropicOptions.isProxied;
6889
- this.client = new Anthropic__default["default"](
6890
- // <- TODO: [🧱] Implement in a functional (not new Class) way
6891
- anthropicOptions);
6930
+ this.client = new Anthropic__default["default"](anthropicOptions);
6931
+ // <- TODO: !!!!!! Lazy-load client
6892
6932
  }
6893
6933
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6894
6934
  get: function () {
@@ -6959,15 +6999,10 @@
6959
6999
  if (contentBlock.type !== 'text') {
6960
7000
  throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6961
7001
  }
6962
- console.log('!!!!!! rawResponse.usage', rawResponse.usage);
6963
7002
  resultContent = contentBlock.text;
6964
7003
  // eslint-disable-next-line prefer-const
6965
7004
  complete = getCurrentIsoDate();
6966
- usage = {
6967
- price: { value: 0, isUncertain: true } /* <- TODO: [🐞] !!!!!! Compute usage */,
6968
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
6969
- output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
6970
- };
7005
+ usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6971
7006
  return [2 /*return*/, {
6972
7007
  content: resultContent,
6973
7008
  modelName: rawResponse.model,
@@ -7492,9 +7527,8 @@
7492
7527
  this.options = options;
7493
7528
  this.client = new openai.OpenAIClient(
7494
7529
  // <- TODO: [🧱] Implement in a functional (not new Class) way
7495
- "https://".concat(options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(
7496
- // <- TODO: [🧱] Implement in a functional (not new Class) way
7497
- options.apiKey));
7530
+ "https://".concat(options.resourceName, ".openai.azure.com/"), new openai.AzureKeyCredential(options.apiKey));
7531
+ // <- TODO: !!!!!! Lazy-load client
7498
7532
  }
7499
7533
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7500
7534
  get: function () {
@@ -7764,6 +7798,9 @@
7764
7798
  output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7765
7799
  };
7766
7800
  }
7801
+ /**
7802
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
7803
+ */
7767
7804
 
7768
7805
  /**
7769
7806
  * Execution Tools for calling OpenAI API.
@@ -7784,6 +7821,7 @@
7784
7821
  delete openAiOptions.isVerbose;
7785
7822
  delete openAiOptions.user;
7786
7823
  this.client = new OpenAI__default["default"](__assign({}, openAiOptions));
7824
+ // <- TODO: !!!!!! Lazy-load client
7787
7825
  }
7788
7826
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7789
7827
  get: function () {
@@ -8085,7 +8123,7 @@
8085
8123
  // <- Note: [🦑] Add here new LLM provider
8086
8124
  };
8087
8125
  /**
8088
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
8126
+ * TODO: !!!!!!! Make global register for this
8089
8127
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8090
8128
  */
8091
8129