@promptbook/cli 0.65.0 → 0.66.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/README.md +4 -1
  2. package/esm/index.es.js +137 -61
  3. package/esm/index.es.js.map +1 -1
  4. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  6. package/esm/typings/src/_packages/utils.index.d.ts +10 -8
  7. package/esm/typings/src/config.d.ts +22 -0
  8. package/esm/typings/src/execution/LlmExecutionTools.d.ts +11 -5
  9. package/esm/typings/src/llm-providers/_common/config.d.ts +1 -6
  10. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +2 -2
  11. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  13. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  14. package/esm/typings/src/llm-providers/mocked/fakeTextToExpectations.d.ts +1 -0
  15. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +1 -1
  16. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +5 -1
  17. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +3 -0
  18. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +2 -1
  19. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +4 -1
  20. package/esm/typings/src/utils/currentDate.d.ts +2 -0
  21. package/esm/typings/src/utils/deepFreeze.d.ts +2 -1
  22. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +12 -0
  23. package/esm/typings/src/utils/environment/isRunningInBrowser.d.ts +8 -0
  24. package/esm/typings/src/utils/environment/isRunningInNode.d.ts +8 -0
  25. package/esm/typings/src/utils/environment/isRunningInWebWorker.d.ts +8 -0
  26. package/esm/typings/src/utils/files/isDirectoryExisting.d.ts +3 -1
  27. package/esm/typings/src/utils/files/isFileExisting.d.ts +3 -1
  28. package/esm/typings/src/utils/files/listAllFiles.d.ts +3 -1
  29. package/esm/typings/src/utils/random/randomSeed.d.ts +1 -0
  30. package/package.json +3 -3
  31. package/umd/index.umd.js +137 -61
  32. package/umd/index.umd.js.map +1 -1
  33. package/esm/typings/src/utils/isRunningInWhatever.d.ts +0 -18
package/README.md CHANGED
@@ -63,7 +63,10 @@ const promptbook = await getPipelineCollection().getPipelineByUrl(
63
63
  // ▶ Prepare tools
64
64
  const tools = {
65
65
  llm: createLlmToolsFromEnv(),
66
- script: [new JavascriptExecutionTools()],
66
+ script: [
67
+ new JavascriptExecutionTools(),
68
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
69
+ ],
67
70
  };
68
71
 
69
72
  // ▶ Create executor - the function that will execute the Pipeline
package/esm/index.es.js CHANGED
@@ -20,7 +20,7 @@ import glob from 'glob-promise';
20
20
  /**
21
21
  * The version of the Promptbook library
22
22
  */
23
- var PROMPTBOOK_VERSION = '0.65.0-7';
23
+ var PROMPTBOOK_VERSION = '0.66.0-0';
24
24
  // TODO: !!!! List here all the versions and annotate + put into script
25
25
 
26
26
  /*! *****************************************************************************
@@ -158,34 +158,25 @@ var EnvironmentMismatchError = /** @class */ (function (_super) {
158
158
  return EnvironmentMismatchError;
159
159
  }(Error));
160
160
 
161
- /**
162
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
163
- *
164
- * @public exported from `@promptbook/utils`
165
- */
166
- new Function("\n try {\n return this === window;\n } catch (e) {\n return false;\n }\n");
167
161
  /**
168
162
  * Detects if the code is running in a Node.js environment
169
163
  *
170
- * @public exported from `@promptbook/utils`
171
- */
172
- var isRunningInNode = new Function("\n try {\n return this === global;\n } catch (e) {\n return false;\n }\n");
173
- /**
174
- * Detects if the code is running in a web worker
164
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
175
165
  *
176
166
  * @public exported from `@promptbook/utils`
177
167
  */
178
- new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {\n return true;\n } else {\n return false;\n }\n } catch (e) {\n return false;\n }\n");
168
+ var $isRunningInNode = new Function("\n try {\n return this === global;\n } catch (e) {\n return false;\n }\n");
179
169
 
180
170
  /**
181
171
  * @@@
182
172
  *
173
+ * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
183
174
  * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
184
175
  *
185
176
  * @returns The same object as the input, but deeply frozen
186
177
  * @public exported from `@promptbook/utils`
187
178
  */
188
- function deepFreeze(objectValue) {
179
+ function $deepFreeze(objectValue) {
189
180
  var e_1, _a;
190
181
  var propertyNames = Object.getOwnPropertyNames(objectValue);
191
182
  try {
@@ -193,7 +184,7 @@ function deepFreeze(objectValue) {
193
184
  var propertyName = propertyNames_1_1.value;
194
185
  var value = objectValue[propertyName];
195
186
  if (value && typeof value === 'object') {
196
- deepFreeze(value);
187
+ $deepFreeze(value);
197
188
  }
198
189
  }
199
190
  }
@@ -216,7 +207,7 @@ function deepFreeze(objectValue) {
216
207
  * @private this is in comparison to `deepFreeze` a more specific utility and maybe not very good practice to use without specific reason and considerations
217
208
  */
218
209
  function deepFreezeWithSameType(objectValue) {
219
- return deepFreeze(objectValue);
210
+ return $deepFreeze(objectValue);
220
211
  }
221
212
  /**
222
213
  * TODO: [🧠] Is there a way how to meaningfully test this utility
@@ -290,7 +281,7 @@ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
290
281
  *
291
282
  * @public exported from `@promptbook/core`
292
283
  */
293
- var RESERVED_PARAMETER_NAMES = deepFreeze([
284
+ var RESERVED_PARAMETER_NAMES = $deepFreeze([
294
285
  'content',
295
286
  'context',
296
287
  'knowledge',
@@ -312,6 +303,9 @@ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
312
303
  * @private within the repository
313
304
  */
314
305
  var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
306
+ /**
307
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
308
+ */
315
309
 
316
310
  /**
317
311
  * Initializes `about` command for Promptbook CLI utilities
@@ -673,7 +667,7 @@ function deepClone(objectValue) {
673
667
  *
674
668
  * @public exported from `@promptbook/core`
675
669
  */
676
- var ZERO_USAGE = deepFreeze({
670
+ var ZERO_USAGE = $deepFreeze({
677
671
  price: { value: 0 },
678
672
  input: {
679
673
  tokensCount: { value: 0 },
@@ -841,7 +835,7 @@ function forEachAsync(array, options, callbackfunction) {
841
835
  });
842
836
  }
843
837
 
844
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
838
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
845
839
 
846
840
  /**
847
841
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2946,20 +2940,20 @@ function createPipelineExecutor(options) {
2946
2940
  case 'EMBEDDING': return [3 /*break*/, 12];
2947
2941
  }
2948
2942
  return [3 /*break*/, 14];
2949
- case 8: return [4 /*yield*/, llmTools.callChatModel(deepFreeze(prompt))];
2943
+ case 8: return [4 /*yield*/, llmTools.callChatModel($deepFreeze(prompt))];
2950
2944
  case 9:
2951
2945
  chatResult = _u.sent();
2952
2946
  // TODO: [🍬] Destroy chatThread
2953
2947
  result = chatResult;
2954
2948
  resultString = chatResult.content;
2955
2949
  return [3 /*break*/, 15];
2956
- case 10: return [4 /*yield*/, llmTools.callCompletionModel(deepFreeze(prompt))];
2950
+ case 10: return [4 /*yield*/, llmTools.callCompletionModel($deepFreeze(prompt))];
2957
2951
  case 11:
2958
2952
  completionResult = _u.sent();
2959
2953
  result = completionResult;
2960
2954
  resultString = completionResult.content;
2961
2955
  return [3 /*break*/, 15];
2962
- case 12: return [4 /*yield*/, llmTools.callEmbeddingModel(deepFreeze(prompt))];
2956
+ case 12: return [4 /*yield*/, llmTools.callEmbeddingModel($deepFreeze(prompt))];
2963
2957
  case 13:
2964
2958
  embeddingResult = _u.sent();
2965
2959
  result = embeddingResult;
@@ -2987,7 +2981,7 @@ function createPipelineExecutor(options) {
2987
2981
  _u.label = 19;
2988
2982
  case 19:
2989
2983
  _u.trys.push([19, 21, , 22]);
2990
- return [4 /*yield*/, scriptTools.execute(deepFreeze({
2984
+ return [4 /*yield*/, scriptTools.execute($deepFreeze({
2991
2985
  scriptLanguage: currentTemplate.contentLanguage,
2992
2986
  script: preparedContent,
2993
2987
  parameters: parameters,
@@ -3035,7 +3029,7 @@ function createPipelineExecutor(options) {
3035
3029
  if (tools.userInterface === undefined) {
3036
3030
  throw new PipelineExecutionError('User interface tools are not available');
3037
3031
  }
3038
- return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
3032
+ return [4 /*yield*/, tools.userInterface.promptDialog($deepFreeze({
3039
3033
  promptTitle: currentTemplate.title,
3040
3034
  promptMessage: replaceParameters(currentTemplate.description || '', parameters),
3041
3035
  defaultValue: replaceParameters(preparedContent, parameters),
@@ -3830,7 +3824,9 @@ function prepareTemplates(pipeline, options) {
3830
3824
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3831
3825
  // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3832
3826
  TODO_USE(parameters);
3833
- promptTemplatesPrepared = new Array(promptTemplates.length);
3827
+ promptTemplatesPrepared = new Array(
3828
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
3829
+ promptTemplates.length);
3834
3830
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3835
3831
  var dependentParameterNames, preparedContent, preparedTemplate;
3836
3832
  return __generator(this, function (_a) {
@@ -3903,7 +3899,9 @@ function preparePipeline(pipeline, options) {
3903
3899
  // <- TODO: [🧊]
3904
3900
  currentPreparation,
3905
3901
  ];
3906
- preparedPersonas = new Array(personas.length);
3902
+ preparedPersonas = new Array(
3903
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
3904
+ personas.length);
3907
3905
  return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
3908
3906
  var modelRequirements, preparedPersona;
3909
3907
  return __generator(this, function (_a) {
@@ -5983,9 +5981,11 @@ var CollectionError = /** @class */ (function (_super) {
5983
5981
  /**
5984
5982
  * Checks if the file exists
5985
5983
  *
5984
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
5985
+ *
5986
5986
  * @private within the repository
5987
5987
  */
5988
- function isFileExisting(filePath) {
5988
+ function $isFileExisting(filePath) {
5989
5989
  return __awaiter(this, void 0, void 0, function () {
5990
5990
  var isReadAccessAllowed, isFile;
5991
5991
  return __generator(this, function (_a) {
@@ -6017,9 +6017,11 @@ function isFileExisting(filePath) {
6017
6017
  /**
6018
6018
  * Checks if the directory exists
6019
6019
  *
6020
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
6021
+ *
6020
6022
  * @private within the repository
6021
6023
  */
6022
- function isDirectoryExisting(directoryPath) {
6024
+ function $isDirectoryExisting(directoryPath) {
6023
6025
  return __awaiter(this, void 0, void 0, function () {
6024
6026
  var isReadAccessAllowed, isDirectory;
6025
6027
  return __generator(this, function (_a) {
@@ -6052,18 +6054,20 @@ function isDirectoryExisting(directoryPath) {
6052
6054
  /**
6053
6055
  * Reads all files in the directory
6054
6056
  *
6057
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
6058
+ *
6055
6059
  * @param path
6056
6060
  * @param isRecursive
6057
6061
  * @returns List of all files in the directory
6058
6062
  * @private internal function of `createCollectionFromDirectory`
6059
6063
  */
6060
- function listAllFiles(path, isRecursive) {
6064
+ function $listAllFiles(path, isRecursive) {
6061
6065
  return __awaiter(this, void 0, void 0, function () {
6062
6066
  var dirents, fileNames, _a, _b, dirent, subPath, _c, _d, _e, _f, e_1_1;
6063
6067
  var e_1, _g;
6064
6068
  return __generator(this, function (_h) {
6065
6069
  switch (_h.label) {
6066
- case 0: return [4 /*yield*/, isDirectoryExisting(path)];
6070
+ case 0: return [4 /*yield*/, $isDirectoryExisting(path)];
6067
6071
  case 1:
6068
6072
  if (!(_h.sent())) {
6069
6073
  throw new Error("Directory \"".concat(path, "\" does not exist or is not readable"));
@@ -6091,7 +6095,7 @@ function listAllFiles(path, isRecursive) {
6091
6095
  _d = (_c = fileNames.push).apply;
6092
6096
  _e = [fileNames];
6093
6097
  _f = [[]];
6094
- return [4 /*yield*/, listAllFiles(subPath, isRecursive)];
6098
+ return [4 /*yield*/, $listAllFiles(subPath, isRecursive)];
6095
6099
  case 5:
6096
6100
  _d.apply(_c, _e.concat([__spreadArray.apply(void 0, _f.concat([__read.apply(void 0, [(_h.sent())]), false]))]));
6097
6101
  _h.label = 6;
@@ -6223,11 +6227,11 @@ function createCollectionFromDirectory(path, options) {
6223
6227
  return __generator(this, function (_f) {
6224
6228
  switch (_f.label) {
6225
6229
  case 0:
6226
- if (!isRunningInNode()) {
6230
+ if (!$isRunningInNode()) {
6227
6231
  throw new Error('Function `createCollectionFromDirectory` can only be run in Node.js environment because it reads the file system.');
6228
6232
  }
6229
6233
  makedLibraryFilePath = join$1(path, "".concat(PIPELINE_COLLECTION_BASE_FILENAME, ".json"));
6230
- return [4 /*yield*/, isFileExisting(makedLibraryFilePath)];
6234
+ return [4 /*yield*/, $isFileExisting(makedLibraryFilePath)];
6231
6235
  case 1:
6232
6236
  if (!(_f.sent())) {
6233
6237
  console.info(colors.yellow("Tip: Prebuild your pipeline collection (file with supposed prebuild ".concat(makedLibraryFilePath, " not found) with CLI util \"ptbk make\" to speed up the collection creation.")));
@@ -6247,7 +6251,7 @@ function createCollectionFromDirectory(path, options) {
6247
6251
  if (isVerbose) {
6248
6252
  console.info(colors.cyan("Creating pipeline collection from path ".concat(path.split('\\').join('/'))));
6249
6253
  }
6250
- return [4 /*yield*/, listAllFiles(path, isRecursive)];
6254
+ return [4 /*yield*/, $listAllFiles(path, isRecursive)];
6251
6255
  case 1:
6252
6256
  fileNames = _b.sent();
6253
6257
  // Note: First load all .ptbk.json and then .ptbk.md files
@@ -6482,7 +6486,7 @@ function nameToSubfolderPath(name) {
6482
6486
  var FilesStorage = /** @class */ (function () {
6483
6487
  function FilesStorage(options) {
6484
6488
  this.options = options;
6485
- if (!isRunningInNode()) {
6489
+ if (!$isRunningInNode()) {
6486
6490
  throw new EnvironmentMismatchError("FilesStorage works only in Node.js environment");
6487
6491
  }
6488
6492
  }
@@ -6505,7 +6509,7 @@ var FilesStorage = /** @class */ (function () {
6505
6509
  switch (_a.label) {
6506
6510
  case 0:
6507
6511
  filename = this.getFilenameForKey(key);
6508
- return [4 /*yield*/, isFileExisting(filename)];
6512
+ return [4 /*yield*/, $isFileExisting(filename)];
6509
6513
  case 1:
6510
6514
  if (!(_a.sent())) {
6511
6515
  return [2 /*return*/, null];
@@ -6602,7 +6606,9 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6602
6606
  */
6603
6607
  RemoteLlmExecutionTools.prototype.makeConnection = function () {
6604
6608
  var _this = this;
6605
- return new Promise(function (resolve, reject) {
6609
+ return new Promise(
6610
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
6611
+ function (resolve, reject) {
6606
6612
  var socket = io(_this.options.remoteUrl, {
6607
6613
  path: _this.options.path,
6608
6614
  // path: `${this.remoteUrl.pathname}/socket.io`,
@@ -6724,12 +6730,21 @@ function computeUsage(value) {
6724
6730
  /**
6725
6731
  * List of available Anthropic Claude models with pricing
6726
6732
  *
6727
- * Note: Done at 2024-05-25
6733
+ * Note: Done at 2024-08-16
6728
6734
  *
6729
6735
  * @see https://docs.anthropic.com/en/docs/models-overview
6730
6736
  * @public exported from `@promptbook/anthropic-claude`
6731
6737
  */
6732
6738
  var ANTHROPIC_CLAUDE_MODELS = [
6739
+ {
6740
+ modelVariant: 'CHAT',
6741
+ modelTitle: 'Claude 3.5 Sonnet',
6742
+ modelName: 'claude-3-5-sonnet-20240620',
6743
+ pricing: {
6744
+ prompt: computeUsage("$3.00 / 1M tokens"),
6745
+ output: computeUsage("$15.00 / 1M tokens"),
6746
+ },
6747
+ },
6733
6748
  {
6734
6749
  modelVariant: 'CHAT',
6735
6750
  modelTitle: 'Claude 3 Opus',
@@ -6791,9 +6806,18 @@ var ANTHROPIC_CLAUDE_MODELS = [
6791
6806
  * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
6792
6807
  * TODO: [🧠] Some mechanism to propagate unsureness
6793
6808
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6794
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
6809
+ * TODO: [🎰] Some mechanism to auto-update available models
6795
6810
  */
6796
6811
 
6812
+ /**
6813
+ * Get current date in ISO 8601 format
6814
+ *
6815
+ * @private internal utility
6816
+ */
6817
+ function getCurrentIsoDate() {
6818
+ return new Date().toISOString();
6819
+ }
6820
+
6797
6821
  /**
6798
6822
  * Helper of usage compute
6799
6823
  *
@@ -6828,13 +6852,42 @@ function uncertainNumber(value) {
6828
6852
  }
6829
6853
 
6830
6854
  /**
6831
- * Get current date in ISO 8601 format
6855
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6832
6856
  *
6833
- * @private internal utility
6857
+ * @param promptContent The content of the prompt
6858
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6859
+ * @param rawResponse The raw response from Anthropic Claude API
6860
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6861
+ * @private internal utility of `AnthropicClaudeExecutionTools`
6834
6862
  */
6835
- function getCurrentIsoDate() {
6836
- return new Date().toISOString();
6863
+ function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6864
+ resultContent, rawResponse) {
6865
+ var _a, _b;
6866
+ if (rawResponse.usage === undefined) {
6867
+ throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6868
+ }
6869
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6870
+ throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6871
+ }
6872
+ var inputTokens = rawResponse.usage.input_tokens;
6873
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6874
+ var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6875
+ var price;
6876
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6877
+ price = uncertainNumber();
6878
+ }
6879
+ else {
6880
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6881
+ }
6882
+ return {
6883
+ price: price,
6884
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6885
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6886
+ };
6837
6887
  }
6888
+ /**
6889
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
6890
+ */
6838
6891
 
6839
6892
  /**
6840
6893
  * Execution Tools for calling Anthropic Claude API.
@@ -6856,6 +6909,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6856
6909
  delete anthropicOptions.isVerbose;
6857
6910
  delete anthropicOptions.isProxied;
6858
6911
  this.client = new Anthropic(anthropicOptions);
6912
+ // <- TODO: !!!!!! Lazy-load client
6859
6913
  }
6860
6914
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6861
6915
  get: function () {
@@ -6876,7 +6930,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6876
6930
  */
6877
6931
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6878
6932
  return __awaiter(this, void 0, void 0, function () {
6879
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6933
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6880
6934
  return __generator(this, function (_a) {
6881
6935
  switch (_a.label) {
6882
6936
  case 0:
@@ -6922,14 +6976,14 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6922
6976
  if (rawResponse.content.length > 1) {
6923
6977
  throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
6924
6978
  }
6925
- resultContent = rawResponse.content[0].text;
6979
+ contentBlock = rawResponse.content[0];
6980
+ if (contentBlock.type !== 'text') {
6981
+ throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6982
+ }
6983
+ resultContent = contentBlock.text;
6926
6984
  // eslint-disable-next-line prefer-const
6927
6985
  complete = getCurrentIsoDate();
6928
- usage = {
6929
- price: { value: 0, isUncertain: true } /* <- TODO: [🐞] Compute usage */,
6930
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
6931
- output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
6932
- };
6986
+ usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6933
6987
  return [2 /*return*/, {
6934
6988
  content: resultContent,
6935
6989
  modelName: rawResponse.model,
@@ -7077,7 +7131,9 @@ function createAnthropicClaudeExecutionTools(options) {
7077
7131
  },
7078
7132
  ], models: ANTHROPIC_CLAUDE_MODELS }));
7079
7133
  }
7080
- return new AnthropicClaudeExecutionTools(options);
7134
+ return new AnthropicClaudeExecutionTools(
7135
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
7136
+ options);
7081
7137
  }
7082
7138
  /**
7083
7139
  * TODO: [🧠] !!!! Make anonymous this with all LLM providers
@@ -7426,7 +7482,8 @@ var OPENAI_MODELS = [
7426
7482
  /**
7427
7483
  * Note: [🤖] Add models of new variant
7428
7484
  * TODO: [🧠] Some mechanism to propagate unsureness
7429
- * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7485
+ * TODO: [🎰] Some mechanism to auto-update available models
7486
+ * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7430
7487
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
7431
7488
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
7432
7489
  * @see https://openai.com/api/pricing/
@@ -7449,7 +7506,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7449
7506
  */
7450
7507
  function AzureOpenAiExecutionTools(options) {
7451
7508
  this.options = options;
7452
- this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7509
+ this.client = new OpenAIClient(
7510
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
7511
+ "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7512
+ // <- TODO: !!!!!! Lazy-load client
7453
7513
  }
7454
7514
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7455
7515
  get: function () {
@@ -7719,6 +7779,9 @@ resultContent, rawResponse) {
7719
7779
  output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7720
7780
  };
7721
7781
  }
7782
+ /**
7783
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
7784
+ */
7722
7785
 
7723
7786
  /**
7724
7787
  * Execution Tools for calling OpenAI API.
@@ -7739,6 +7802,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7739
7802
  delete openAiOptions.isVerbose;
7740
7803
  delete openAiOptions.user;
7741
7804
  this.client = new OpenAI(__assign({}, openAiOptions));
7805
+ // <- TODO: !!!!!! Lazy-load client
7742
7806
  }
7743
7807
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7744
7808
  get: function () {
@@ -8032,11 +8096,15 @@ var EXECUTION_TOOLS_CLASSES = {
8032
8096
  return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
8033
8097
  },
8034
8098
  createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
8035
- createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
8099
+ createAzureOpenAiExecutionTools: function (options) {
8100
+ return new AzureOpenAiExecutionTools(
8101
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
8102
+ options);
8103
+ },
8036
8104
  // <- Note: [🦑] Add here new LLM provider
8037
8105
  };
8038
8106
  /**
8039
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
8107
+ * TODO: !!!!!!! Make global register for this
8040
8108
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
8041
8109
  */
8042
8110
 
@@ -8078,7 +8146,7 @@ function createLlmToolsFromConfiguration(configuration, options) {
8078
8146
  * @public exported from `@promptbook/node`
8079
8147
  */
8080
8148
  function createLlmToolsFromConfigurationFromEnv() {
8081
- if (!isRunningInNode()) {
8149
+ if (!$isRunningInNode()) {
8082
8150
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
8083
8151
  }
8084
8152
  dotenv.config();
@@ -8132,7 +8200,7 @@ function createLlmToolsFromConfigurationFromEnv() {
8132
8200
  */
8133
8201
  function createLlmToolsFromEnv(options) {
8134
8202
  if (options === void 0) { options = {}; }
8135
- if (!isRunningInNode()) {
8203
+ if (!$isRunningInNode()) {
8136
8204
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
8137
8205
  }
8138
8206
  var configuration = createLlmToolsFromConfigurationFromEnv();
@@ -8206,6 +8274,8 @@ var MemoryStorage = /** @class */ (function () {
8206
8274
  /**
8207
8275
  * Simple wrapper `new Date().toISOString()`
8208
8276
  *
8277
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic because it depends on the current time
8278
+ *
8209
8279
  * @returns string_date branded type
8210
8280
  * @public exported from `@promptbook/utils`
8211
8281
  */
@@ -8225,7 +8295,11 @@ function $currentDate() {
8225
8295
  function cacheLlmTools(llmTools, options) {
8226
8296
  var _this = this;
8227
8297
  if (options === void 0) { options = {}; }
8228
- var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a, _b = options.isReloaded, isReloaded = _b === void 0 ? false : _b;
8298
+ var _a = options.storage, storage = _a === void 0 ? new MemoryStorage() : _a,
8299
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
8300
+ _b = options.isReloaded,
8301
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
8302
+ isReloaded = _b === void 0 ? false : _b;
8229
8303
  var proxyTools = __assign(__assign({}, llmTools), {
8230
8304
  // <- Note: [🥫]
8231
8305
  get title() {
@@ -8330,14 +8404,16 @@ function cacheLlmTools(llmTools, options) {
8330
8404
  * @private within the repository - for CLI utils
8331
8405
  */
8332
8406
  function getLlmToolsForCli(options) {
8333
- if (!isRunningInNode()) {
8407
+ if (!$isRunningInNode()) {
8334
8408
  throw new EnvironmentMismatchError('Function `getLlmToolsForTestingAndScriptsAndPlayground` works only in Node.js environment');
8335
8409
  }
8336
8410
  var _a = (options !== null && options !== void 0 ? options : {}).isCacheReloaded, isCacheReloaded = _a === void 0 ? false : _a;
8337
8411
  return cacheLlmTools(countTotalUsage(
8338
8412
  // <- Note: for example here we don`t want the [🌯]
8339
8413
  createLlmToolsFromEnv()), {
8340
- storage: new FilesStorage({ cacheFolderPath: join$1(process.cwd(), EXECUTIONS_CACHE_DIRNAME) }),
8414
+ storage: new FilesStorage(
8415
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
8416
+ { cacheFolderPath: join$1(process.cwd(), EXECUTIONS_CACHE_DIRNAME) }),
8341
8417
  isReloaded: isCacheReloaded,
8342
8418
  });
8343
8419
  }
@@ -8830,7 +8906,7 @@ function promptbookCli() {
8830
8906
  return __awaiter(this, void 0, void 0, function () {
8831
8907
  var program;
8832
8908
  return __generator(this, function (_a) {
8833
- if (!isRunningInNode()) {
8909
+ if (!$isRunningInNode()) {
8834
8910
  throw new EnvironmentMismatchError(spaceTrim$1("\n Function promptbookCli is initiator of CLI script and should be run in Node.js environment.\n\n - In browser use function exported from `@promptbook/utils` or `@promptbook/core` directly, for example `prettifyPipelineString`.\n\n "));
8835
8911
  }
8836
8912
  program = new commander.Command();