@promptbook/node 0.65.0 → 0.66.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/esm/index.es.js +132 -64
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/anthropic-claude.index.d.ts +2 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +6 -2
  5. package/esm/typings/src/_packages/utils.index.d.ts +10 -8
  6. package/esm/typings/src/config.d.ts +22 -0
  7. package/esm/typings/src/execution/LlmExecutionTools.d.ts +11 -5
  8. package/esm/typings/src/llm-providers/_common/config.d.ts +1 -6
  9. package/esm/typings/src/llm-providers/anthropic-claude/anthropic-claude-models.d.ts +2 -2
  10. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.d.ts +18 -0
  11. package/esm/typings/src/llm-providers/anthropic-claude/computeAnthropicClaudeUsage.test.d.ts +4 -0
  12. package/esm/typings/src/llm-providers/anthropic-claude/register1.d.ts +4 -0
  13. package/esm/typings/src/llm-providers/mocked/fakeTextToExpectations.d.ts +1 -0
  14. package/esm/typings/src/llm-providers/multiple/joinLlmExecutionTools.d.ts +1 -1
  15. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.d.ts +5 -1
  16. package/esm/typings/src/llm-providers/openai/computeOpenaiUsage.test.d.ts +3 -0
  17. package/esm/typings/src/llm-providers/openai/openai-models.d.ts +2 -1
  18. package/esm/typings/src/llm-providers/remote/interfaces/RemoteLlmExecutionToolsOptions.d.ts +4 -1
  19. package/esm/typings/src/utils/currentDate.d.ts +2 -0
  20. package/esm/typings/src/utils/deepFreeze.d.ts +2 -1
  21. package/esm/typings/src/utils/environment/getGlobalScope.d.ts +12 -0
  22. package/esm/typings/src/utils/environment/isRunningInBrowser.d.ts +8 -0
  23. package/esm/typings/src/utils/environment/isRunningInNode.d.ts +8 -0
  24. package/esm/typings/src/utils/environment/isRunningInWebWorker.d.ts +8 -0
  25. package/esm/typings/src/utils/files/isDirectoryExisting.d.ts +3 -1
  26. package/esm/typings/src/utils/files/isFileExisting.d.ts +3 -1
  27. package/esm/typings/src/utils/files/listAllFiles.d.ts +3 -1
  28. package/esm/typings/src/utils/random/randomSeed.d.ts +1 -0
  29. package/package.json +4 -4
  30. package/umd/index.umd.js +132 -64
  31. package/umd/index.umd.js.map +1 -1
  32. package/esm/typings/src/utils/isRunningInWhatever.d.ts +0 -18
package/esm/index.es.js CHANGED
@@ -17,7 +17,7 @@ import OpenAI from 'openai';
17
17
  /**
18
18
  * The version of the Promptbook library
19
19
  */
20
- var PROMPTBOOK_VERSION = '0.65.0-7';
20
+ var PROMPTBOOK_VERSION = '0.66.0-0';
21
21
  // TODO: !!!! List here all the versions and annotate + put into script
22
22
 
23
23
  /*! *****************************************************************************
@@ -142,12 +142,13 @@ function __spreadArray(to, from, pack) {
142
142
  /**
143
143
  * @@@
144
144
  *
145
+ * Note: `$` is used to indicate that this function is not a pure function - it mutates given object
145
146
  * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
146
147
  *
147
148
  * @returns The same object as the input, but deeply frozen
148
149
  * @public exported from `@promptbook/utils`
149
150
  */
150
- function deepFreeze(objectValue) {
151
+ function $deepFreeze(objectValue) {
151
152
  var e_1, _a;
152
153
  var propertyNames = Object.getOwnPropertyNames(objectValue);
153
154
  try {
@@ -155,7 +156,7 @@ function deepFreeze(objectValue) {
155
156
  var propertyName = propertyNames_1_1.value;
156
157
  var value = objectValue[propertyName];
157
158
  if (value && typeof value === 'object') {
158
- deepFreeze(value);
159
+ $deepFreeze(value);
159
160
  }
160
161
  }
161
162
  }
@@ -178,7 +179,7 @@ function deepFreeze(objectValue) {
178
179
  * @private this is in comparison to `deepFreeze` a more specific utility and maybe not very good practice to use without specific reason and considerations
179
180
  */
180
181
  function deepFreezeWithSameType(objectValue) {
181
- return deepFreeze(objectValue);
182
+ return $deepFreeze(objectValue);
182
183
  }
183
184
  /**
184
185
  * TODO: [🧠] Is there a way how to meaningfully test this utility
@@ -226,7 +227,7 @@ var REPLACING_NONCE = 'u$k42k%!V2zo34w7Fu#@QUHYPW';
226
227
  *
227
228
  * @public exported from `@promptbook/core`
228
229
  */
229
- var RESERVED_PARAMETER_NAMES = deepFreeze([
230
+ var RESERVED_PARAMETER_NAMES = $deepFreeze([
230
231
  'content',
231
232
  'context',
232
233
  'knowledge',
@@ -248,6 +249,9 @@ var RESERVED_PARAMETER_MISSING_VALUE = 'MISSING-' + REPLACING_NONCE;
248
249
  * @private within the repository
249
250
  */
250
251
  var RESERVED_PARAMETER_RESTRICTED = 'RESTRICTED-' + REPLACING_NONCE;
252
+ /**
253
+ * TODO: [🧠][🧜‍♂️] Maybe join remoteUrl and path into single value
254
+ */
251
255
 
252
256
  /**
253
257
  * Prettify the html code
@@ -524,7 +528,7 @@ function deepClone(objectValue) {
524
528
  *
525
529
  * @public exported from `@promptbook/core`
526
530
  */
527
- var ZERO_USAGE = deepFreeze({
531
+ var ZERO_USAGE = $deepFreeze({
528
532
  price: { value: 0 },
529
533
  input: {
530
534
  tokensCount: { value: 0 },
@@ -692,7 +696,7 @@ function forEachAsync(array, options, callbackfunction) {
692
696
  });
693
697
  }
694
698
 
695
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.65.0-7",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
699
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
696
700
 
697
701
  /**
698
702
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -2797,20 +2801,20 @@ function createPipelineExecutor(options) {
2797
2801
  case 'EMBEDDING': return [3 /*break*/, 12];
2798
2802
  }
2799
2803
  return [3 /*break*/, 14];
2800
- case 8: return [4 /*yield*/, llmTools.callChatModel(deepFreeze(prompt))];
2804
+ case 8: return [4 /*yield*/, llmTools.callChatModel($deepFreeze(prompt))];
2801
2805
  case 9:
2802
2806
  chatResult = _u.sent();
2803
2807
  // TODO: [🍬] Destroy chatThread
2804
2808
  result = chatResult;
2805
2809
  resultString = chatResult.content;
2806
2810
  return [3 /*break*/, 15];
2807
- case 10: return [4 /*yield*/, llmTools.callCompletionModel(deepFreeze(prompt))];
2811
+ case 10: return [4 /*yield*/, llmTools.callCompletionModel($deepFreeze(prompt))];
2808
2812
  case 11:
2809
2813
  completionResult = _u.sent();
2810
2814
  result = completionResult;
2811
2815
  resultString = completionResult.content;
2812
2816
  return [3 /*break*/, 15];
2813
- case 12: return [4 /*yield*/, llmTools.callEmbeddingModel(deepFreeze(prompt))];
2817
+ case 12: return [4 /*yield*/, llmTools.callEmbeddingModel($deepFreeze(prompt))];
2814
2818
  case 13:
2815
2819
  embeddingResult = _u.sent();
2816
2820
  result = embeddingResult;
@@ -2838,7 +2842,7 @@ function createPipelineExecutor(options) {
2838
2842
  _u.label = 19;
2839
2843
  case 19:
2840
2844
  _u.trys.push([19, 21, , 22]);
2841
- return [4 /*yield*/, scriptTools.execute(deepFreeze({
2845
+ return [4 /*yield*/, scriptTools.execute($deepFreeze({
2842
2846
  scriptLanguage: currentTemplate.contentLanguage,
2843
2847
  script: preparedContent,
2844
2848
  parameters: parameters,
@@ -2886,7 +2890,7 @@ function createPipelineExecutor(options) {
2886
2890
  if (tools.userInterface === undefined) {
2887
2891
  throw new PipelineExecutionError('User interface tools are not available');
2888
2892
  }
2889
- return [4 /*yield*/, tools.userInterface.promptDialog(deepFreeze({
2893
+ return [4 /*yield*/, tools.userInterface.promptDialog($deepFreeze({
2890
2894
  promptTitle: currentTemplate.title,
2891
2895
  promptMessage: replaceParameters(currentTemplate.description || '', parameters),
2892
2896
  defaultValue: replaceParameters(preparedContent, parameters),
@@ -3681,7 +3685,9 @@ function prepareTemplates(pipeline, options) {
3681
3685
  promptTemplates = pipeline.promptTemplates, parameters = pipeline.parameters, knowledgePiecesCount = pipeline.knowledgePiecesCount;
3682
3686
  // TODO: !!!!! Apply samples to each template (if missing and is for the template defined)
3683
3687
  TODO_USE(parameters);
3684
- promptTemplatesPrepared = new Array(promptTemplates.length);
3688
+ promptTemplatesPrepared = new Array(
3689
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
3690
+ promptTemplates.length);
3685
3691
  return [4 /*yield*/, forEachAsync(promptTemplates, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (template, index) { return __awaiter(_this, void 0, void 0, function () {
3686
3692
  var dependentParameterNames, preparedContent, preparedTemplate;
3687
3693
  return __generator(this, function (_a) {
@@ -3754,7 +3760,9 @@ function preparePipeline(pipeline, options) {
3754
3760
  // <- TODO: [🧊]
3755
3761
  currentPreparation,
3756
3762
  ];
3757
- preparedPersonas = new Array(personas.length);
3763
+ preparedPersonas = new Array(
3764
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
3765
+ personas.length);
3758
3766
  return [4 /*yield*/, forEachAsync(personas, { maxParallelCount: maxParallelCount /* <- TODO: [🪂] When there are subtasks, this maximul limit can be broken */ }, function (persona, index) { return __awaiter(_this, void 0, void 0, function () {
3759
3767
  var modelRequirements, preparedPersona;
3760
3768
  return __generator(this, function (_a) {
@@ -5831,12 +5839,23 @@ var CollectionError = /** @class */ (function (_super) {
5831
5839
  return CollectionError;
5832
5840
  }(Error));
5833
5841
 
5842
+ /**
5843
+ * Detects if the code is running in a Node.js environment
5844
+ *
5845
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
5846
+ *
5847
+ * @public exported from `@promptbook/utils`
5848
+ */
5849
+ var $isRunningInNode = new Function("\n try {\n return this === global;\n } catch (e) {\n return false;\n }\n");
5850
+
5834
5851
  /**
5835
5852
  * Checks if the file exists
5836
5853
  *
5854
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
5855
+ *
5837
5856
  * @private within the repository
5838
5857
  */
5839
- function isFileExisting(filePath) {
5858
+ function $isFileExisting(filePath) {
5840
5859
  return __awaiter(this, void 0, void 0, function () {
5841
5860
  var isReadAccessAllowed, isFile;
5842
5861
  return __generator(this, function (_a) {
@@ -5868,9 +5887,11 @@ function isFileExisting(filePath) {
5868
5887
  /**
5869
5888
  * Checks if the directory exists
5870
5889
  *
5890
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
5891
+ *
5871
5892
  * @private within the repository
5872
5893
  */
5873
- function isDirectoryExisting(directoryPath) {
5894
+ function $isDirectoryExisting(directoryPath) {
5874
5895
  return __awaiter(this, void 0, void 0, function () {
5875
5896
  var isReadAccessAllowed, isDirectory;
5876
5897
  return __generator(this, function (_a) {
@@ -5903,18 +5924,20 @@ function isDirectoryExisting(directoryPath) {
5903
5924
  /**
5904
5925
  * Reads all files in the directory
5905
5926
  *
5927
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the filesystem
5928
+ *
5906
5929
  * @param path
5907
5930
  * @param isRecursive
5908
5931
  * @returns List of all files in the directory
5909
5932
  * @private internal function of `createCollectionFromDirectory`
5910
5933
  */
5911
- function listAllFiles(path, isRecursive) {
5934
+ function $listAllFiles(path, isRecursive) {
5912
5935
  return __awaiter(this, void 0, void 0, function () {
5913
5936
  var dirents, fileNames, _a, _b, dirent, subPath, _c, _d, _e, _f, e_1_1;
5914
5937
  var e_1, _g;
5915
5938
  return __generator(this, function (_h) {
5916
5939
  switch (_h.label) {
5917
- case 0: return [4 /*yield*/, isDirectoryExisting(path)];
5940
+ case 0: return [4 /*yield*/, $isDirectoryExisting(path)];
5918
5941
  case 1:
5919
5942
  if (!(_h.sent())) {
5920
5943
  throw new Error("Directory \"".concat(path, "\" does not exist or is not readable"));
@@ -5942,7 +5965,7 @@ function listAllFiles(path, isRecursive) {
5942
5965
  _d = (_c = fileNames.push).apply;
5943
5966
  _e = [fileNames];
5944
5967
  _f = [[]];
5945
- return [4 /*yield*/, listAllFiles(subPath, isRecursive)];
5968
+ return [4 /*yield*/, $listAllFiles(subPath, isRecursive)];
5946
5969
  case 5:
5947
5970
  _d.apply(_c, _e.concat([__spreadArray.apply(void 0, _f.concat([__read.apply(void 0, [(_h.sent())]), false]))]));
5948
5971
  _h.label = 6;
@@ -5970,25 +5993,6 @@ function listAllFiles(path, isRecursive) {
5970
5993
  * TODO: [🖇] What about symlinks?
5971
5994
  */
5972
5995
 
5973
- /**
5974
- * Detects if the code is running in a browser environment in main thread (Not in a web worker)
5975
- *
5976
- * @public exported from `@promptbook/utils`
5977
- */
5978
- new Function("\n try {\n return this === window;\n } catch (e) {\n return false;\n }\n");
5979
- /**
5980
- * Detects if the code is running in a Node.js environment
5981
- *
5982
- * @public exported from `@promptbook/utils`
5983
- */
5984
- var isRunningInNode = new Function("\n try {\n return this === global;\n } catch (e) {\n return false;\n }\n");
5985
- /**
5986
- * Detects if the code is running in a web worker
5987
- *
5988
- * @public exported from `@promptbook/utils`
5989
- */
5990
- new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {\n return true;\n } else {\n return false;\n }\n } catch (e) {\n return false;\n }\n");
5991
-
5992
5996
  /**
5993
5997
  * Constructs Promptbook from async sources
5994
5998
  * It can be one of the following:
@@ -6093,11 +6097,11 @@ function createCollectionFromDirectory(path, options) {
6093
6097
  return __generator(this, function (_f) {
6094
6098
  switch (_f.label) {
6095
6099
  case 0:
6096
- if (!isRunningInNode()) {
6100
+ if (!$isRunningInNode()) {
6097
6101
  throw new Error('Function `createCollectionFromDirectory` can only be run in Node.js environment because it reads the file system.');
6098
6102
  }
6099
6103
  makedLibraryFilePath = join$1(path, "".concat(PIPELINE_COLLECTION_BASE_FILENAME, ".json"));
6100
- return [4 /*yield*/, isFileExisting(makedLibraryFilePath)];
6104
+ return [4 /*yield*/, $isFileExisting(makedLibraryFilePath)];
6101
6105
  case 1:
6102
6106
  if (!(_f.sent())) {
6103
6107
  console.info(colors.yellow("Tip: Prebuild your pipeline collection (file with supposed prebuild ".concat(makedLibraryFilePath, " not found) with CLI util \"ptbk make\" to speed up the collection creation.")));
@@ -6117,7 +6121,7 @@ function createCollectionFromDirectory(path, options) {
6117
6121
  if (isVerbose) {
6118
6122
  console.info(colors.cyan("Creating pipeline collection from path ".concat(path.split('\\').join('/'))));
6119
6123
  }
6120
- return [4 /*yield*/, listAllFiles(path, isRecursive)];
6124
+ return [4 /*yield*/, $listAllFiles(path, isRecursive)];
6121
6125
  case 1:
6122
6126
  fileNames = _b.sent();
6123
6127
  // Note: First load all .ptbk.json and then .ptbk.md files
@@ -6295,7 +6299,7 @@ var EnvironmentMismatchError = /** @class */ (function (_super) {
6295
6299
  * @public exported from `@promptbook/node`
6296
6300
  */
6297
6301
  function createLlmToolsFromConfigurationFromEnv() {
6298
- if (!isRunningInNode()) {
6302
+ if (!$isRunningInNode()) {
6299
6303
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
6300
6304
  }
6301
6305
  dotenv.config();
@@ -6366,7 +6370,9 @@ var RemoteLlmExecutionTools = /** @class */ (function () {
6366
6370
  */
6367
6371
  RemoteLlmExecutionTools.prototype.makeConnection = function () {
6368
6372
  var _this = this;
6369
- return new Promise(function (resolve, reject) {
6373
+ return new Promise(
6374
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
6375
+ function (resolve, reject) {
6370
6376
  var socket = io(_this.options.remoteUrl, {
6371
6377
  path: _this.options.path,
6372
6378
  // path: `${this.remoteUrl.pathname}/socket.io`,
@@ -6488,12 +6494,21 @@ function computeUsage(value) {
6488
6494
  /**
6489
6495
  * List of available Anthropic Claude models with pricing
6490
6496
  *
6491
- * Note: Done at 2024-05-25
6497
+ * Note: Done at 2024-08-16
6492
6498
  *
6493
6499
  * @see https://docs.anthropic.com/en/docs/models-overview
6494
6500
  * @public exported from `@promptbook/anthropic-claude`
6495
6501
  */
6496
6502
  var ANTHROPIC_CLAUDE_MODELS = [
6503
+ {
6504
+ modelVariant: 'CHAT',
6505
+ modelTitle: 'Claude 3.5 Sonnet',
6506
+ modelName: 'claude-3-5-sonnet-20240620',
6507
+ pricing: {
6508
+ prompt: computeUsage("$3.00 / 1M tokens"),
6509
+ output: computeUsage("$15.00 / 1M tokens"),
6510
+ },
6511
+ },
6497
6512
  {
6498
6513
  modelVariant: 'CHAT',
6499
6514
  modelTitle: 'Claude 3 Opus',
@@ -6555,9 +6570,18 @@ var ANTHROPIC_CLAUDE_MODELS = [
6555
6570
  * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
6556
6571
  * TODO: [🧠] Some mechanism to propagate unsureness
6557
6572
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
6558
- * TODO: [🕚] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
6573
+ * TODO: [🎰] Some mechanism to auto-update available models
6559
6574
  */
6560
6575
 
6576
+ /**
6577
+ * Get current date in ISO 8601 format
6578
+ *
6579
+ * @private internal utility
6580
+ */
6581
+ function getCurrentIsoDate() {
6582
+ return new Date().toISOString();
6583
+ }
6584
+
6561
6585
  /**
6562
6586
  * Helper of usage compute
6563
6587
  *
@@ -6592,13 +6616,42 @@ function uncertainNumber(value) {
6592
6616
  }
6593
6617
 
6594
6618
  /**
6595
- * Get current date in ISO 8601 format
6619
+ * Computes the usage of the Anthropic Claude API based on the response from Anthropic Claude
6596
6620
  *
6597
- * @private internal utility
6621
+ * @param promptContent The content of the prompt
6622
+ * @param resultContent The content of the result (for embedding prompts or failed prompts pass empty string)
6623
+ * @param rawResponse The raw response from Anthropic Claude API
6624
+ * @throws {PipelineExecutionError} If the usage is not defined in the response from Anthropic Claude
6625
+ * @private internal utility of `AnthropicClaudeExecutionTools`
6598
6626
  */
6599
- function getCurrentIsoDate() {
6600
- return new Date().toISOString();
6627
+ function computeAnthropicClaudeUsage(promptContent, // <- Note: Intentionally using [] to access type properties to bring jsdoc from Prompt/PromptResult to consumer
6628
+ resultContent, rawResponse) {
6629
+ var _a, _b;
6630
+ if (rawResponse.usage === undefined) {
6631
+ throw new PipelineExecutionError('The usage is not defined in the response from Anthropic Claude');
6632
+ }
6633
+ if (((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.input_tokens) === undefined) {
6634
+ throw new PipelineExecutionError('In Anthropic Claude response `usage.prompt_tokens` not defined');
6635
+ }
6636
+ var inputTokens = rawResponse.usage.input_tokens;
6637
+ var outputTokens = ((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.output_tokens) || 0;
6638
+ var modelInfo = ANTHROPIC_CLAUDE_MODELS.find(function (model) { return model.modelName === rawResponse.model; });
6639
+ var price;
6640
+ if (modelInfo === undefined || modelInfo.pricing === undefined) {
6641
+ price = uncertainNumber();
6642
+ }
6643
+ else {
6644
+ price = uncertainNumber(inputTokens * modelInfo.pricing.prompt + outputTokens * modelInfo.pricing.output);
6645
+ }
6646
+ return {
6647
+ price: price,
6648
+ input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(promptContent)),
6649
+ output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
6650
+ };
6601
6651
  }
6652
+ /**
6653
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
6654
+ */
6602
6655
 
6603
6656
  /**
6604
6657
  * Execution Tools for calling Anthropic Claude API.
@@ -6620,6 +6673,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6620
6673
  delete anthropicOptions.isVerbose;
6621
6674
  delete anthropicOptions.isProxied;
6622
6675
  this.client = new Anthropic(anthropicOptions);
6676
+ // <- TODO: !!!!!! Lazy-load client
6623
6677
  }
6624
6678
  Object.defineProperty(AnthropicClaudeExecutionTools.prototype, "title", {
6625
6679
  get: function () {
@@ -6640,7 +6694,7 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6640
6694
  */
6641
6695
  AnthropicClaudeExecutionTools.prototype.callChatModel = function (prompt) {
6642
6696
  return __awaiter(this, void 0, void 0, function () {
6643
- var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, resultContent, usage;
6697
+ var content, parameters, modelRequirements, modelName, rawPromptContent, rawRequest, start, complete, rawResponse, contentBlock, resultContent, usage;
6644
6698
  return __generator(this, function (_a) {
6645
6699
  switch (_a.label) {
6646
6700
  case 0:
@@ -6686,14 +6740,14 @@ var AnthropicClaudeExecutionTools = /** @class */ (function () {
6686
6740
  if (rawResponse.content.length > 1) {
6687
6741
  throw new PipelineExecutionError('More than one content blocks from Anthropic Claude');
6688
6742
  }
6689
- resultContent = rawResponse.content[0].text;
6743
+ contentBlock = rawResponse.content[0];
6744
+ if (contentBlock.type !== 'text') {
6745
+ throw new PipelineExecutionError("Returned content is not \"text\" type but \"".concat(contentBlock.type, "\""));
6746
+ }
6747
+ resultContent = contentBlock.text;
6690
6748
  // eslint-disable-next-line prefer-const
6691
6749
  complete = getCurrentIsoDate();
6692
- usage = {
6693
- price: { value: 0, isUncertain: true } /* <- TODO: [🐞] Compute usage */,
6694
- input: __assign({ tokensCount: uncertainNumber(rawResponse.usage.input_tokens) }, computeUsageCounts(prompt.content)),
6695
- output: __assign({ tokensCount: uncertainNumber(rawResponse.usage.output_tokens) }, computeUsageCounts(prompt.content)),
6696
- };
6750
+ usage = computeAnthropicClaudeUsage(content, '', rawResponse);
6697
6751
  return [2 /*return*/, {
6698
6752
  content: resultContent,
6699
6753
  modelName: rawResponse.model,
@@ -6841,7 +6895,9 @@ function createAnthropicClaudeExecutionTools(options) {
6841
6895
  },
6842
6896
  ], models: ANTHROPIC_CLAUDE_MODELS }));
6843
6897
  }
6844
- return new AnthropicClaudeExecutionTools(options);
6898
+ return new AnthropicClaudeExecutionTools(
6899
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
6900
+ options);
6845
6901
  }
6846
6902
  /**
6847
6903
  * TODO: [🧠] !!!! Make anonymous this with all LLM providers
@@ -7190,7 +7246,8 @@ var OPENAI_MODELS = [
7190
7246
  /**
7191
7247
  * Note: [🤖] Add models of new variant
7192
7248
  * TODO: [🧠] Some mechanism to propagate unsureness
7193
- * TODO: [🕚][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7249
+ * TODO: [🎰] Some mechanism to auto-update available models
7250
+ * TODO: [🎰][👮‍♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing
7194
7251
  * TODO: [🧠][👮‍♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...
7195
7252
  * @see https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4
7196
7253
  * @see https://openai.com/api/pricing/
@@ -7213,7 +7270,10 @@ var AzureOpenAiExecutionTools = /** @class */ (function () {
7213
7270
  */
7214
7271
  function AzureOpenAiExecutionTools(options) {
7215
7272
  this.options = options;
7216
- this.client = new OpenAIClient("https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7273
+ this.client = new OpenAIClient(
7274
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
7275
+ "https://".concat(options.resourceName, ".openai.azure.com/"), new AzureKeyCredential(options.apiKey));
7276
+ // <- TODO: !!!!!! Lazy-load client
7217
7277
  }
7218
7278
  Object.defineProperty(AzureOpenAiExecutionTools.prototype, "title", {
7219
7279
  get: function () {
@@ -7483,6 +7543,9 @@ resultContent, rawResponse) {
7483
7543
  output: __assign({ tokensCount: uncertainNumber(outputTokens) }, computeUsageCounts(resultContent)),
7484
7544
  };
7485
7545
  }
7546
+ /**
7547
+ * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenaiUsage` and `computeAnthropicClaudeUsage`
7548
+ */
7486
7549
 
7487
7550
  /**
7488
7551
  * Execution Tools for calling OpenAI API.
@@ -7503,6 +7566,7 @@ var OpenAiExecutionTools = /** @class */ (function () {
7503
7566
  delete openAiOptions.isVerbose;
7504
7567
  delete openAiOptions.user;
7505
7568
  this.client = new OpenAI(__assign({}, openAiOptions));
7569
+ // <- TODO: !!!!!! Lazy-load client
7506
7570
  }
7507
7571
  Object.defineProperty(OpenAiExecutionTools.prototype, "title", {
7508
7572
  get: function () {
@@ -7796,11 +7860,15 @@ var EXECUTION_TOOLS_CLASSES = {
7796
7860
  return new OpenAiExecutionTools(__assign(__assign({}, options), { dangerouslyAllowBrowser: true /* <- TODO: [🧠] !!! Some mechanism for auto-detection of browser, maybe hide in `OpenAiExecutionTools` */ }));
7797
7861
  },
7798
7862
  createAnthropicClaudeExecutionTools: createAnthropicClaudeExecutionTools,
7799
- createAzureOpenAiExecutionTools: function (options) { return new AzureOpenAiExecutionTools(options); },
7863
+ createAzureOpenAiExecutionTools: function (options) {
7864
+ return new AzureOpenAiExecutionTools(
7865
+ // <- TODO: [🧱] Implement in a functional (not new Class) way
7866
+ options);
7867
+ },
7800
7868
  // <- Note: [🦑] Add here new LLM provider
7801
7869
  };
7802
7870
  /**
7803
- * TODO: [🧠] Better file name than `config.ts` + maybe move to two separate files
7871
+ * TODO: !!!!!!! Make global register for this
7804
7872
  * TODO: [🧠][🎌] Adding this should be responsibility of each provider package NOT this one central place
7805
7873
  */
7806
7874
 
@@ -7845,7 +7913,7 @@ function createLlmToolsFromConfiguration(configuration, options) {
7845
7913
  */
7846
7914
  function createLlmToolsFromEnv(options) {
7847
7915
  if (options === void 0) { options = {}; }
7848
- if (!isRunningInNode()) {
7916
+ if (!$isRunningInNode()) {
7849
7917
  throw new EnvironmentMismatchError('Function `createLlmToolsFromEnv` works only in Node.js environment');
7850
7918
  }
7851
7919
  var configuration = createLlmToolsFromConfigurationFromEnv();
@@ -7905,7 +7973,7 @@ function nameToSubfolderPath(name) {
7905
7973
  var FilesStorage = /** @class */ (function () {
7906
7974
  function FilesStorage(options) {
7907
7975
  this.options = options;
7908
- if (!isRunningInNode()) {
7976
+ if (!$isRunningInNode()) {
7909
7977
  throw new EnvironmentMismatchError("FilesStorage works only in Node.js environment");
7910
7978
  }
7911
7979
  }
@@ -7928,7 +7996,7 @@ var FilesStorage = /** @class */ (function () {
7928
7996
  switch (_a.label) {
7929
7997
  case 0:
7930
7998
  filename = this.getFilenameForKey(key);
7931
- return [4 /*yield*/, isFileExisting(filename)];
7999
+ return [4 /*yield*/, $isFileExisting(filename)];
7932
8000
  case 1:
7933
8001
  if (!(_a.sent())) {
7934
8002
  return [2 /*return*/, null];