@promptbook/cli 0.66.0 → 0.67.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/esm/index.es.js +486 -306
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/src/_packages/core.index.d.ts +4 -4
  4. package/esm/typings/src/_packages/types.index.d.ts +7 -1
  5. package/esm/typings/src/_packages/utils.index.d.ts +14 -8
  6. package/esm/typings/src/commands/EXPECT/ExpectFormatCommand.d.ts +2 -0
  7. package/esm/typings/src/errors/{ReferenceError.d.ts → PipelineUrlError.d.ts} +2 -2
  8. package/esm/typings/src/errors/index.d.ts +27 -0
  9. package/esm/typings/src/errors/utils/ErrorJson.d.ts +20 -0
  10. package/esm/typings/src/errors/utils/deserializeError.d.ts +7 -0
  11. package/esm/typings/src/errors/utils/deserializeError.test.d.ts +1 -0
  12. package/esm/typings/src/errors/utils/serializeError.d.ts +7 -0
  13. package/esm/typings/src/errors/utils/serializeError.test.d.ts +1 -0
  14. package/esm/typings/src/execution/ExecutionTools.d.ts +4 -1
  15. package/esm/typings/src/execution/PipelineExecutor.d.ts +1 -47
  16. package/esm/typings/src/execution/PipelineExecutorResult.d.ts +49 -0
  17. package/esm/typings/src/execution/PromptResult.d.ts +4 -4
  18. package/esm/typings/src/execution/PromptResultUsage.d.ts +4 -0
  19. package/esm/typings/src/execution/UncertainNumber.d.ts +1 -0
  20. package/esm/typings/src/execution/assertsExecutionSuccessful.d.ts +2 -2
  21. package/esm/typings/src/llm-providers/_common/utils/cache/CacheItem.d.ts +0 -1
  22. package/esm/typings/src/llm-providers/mocked/$fakeTextToExpectations.d.ts +2 -2
  23. package/esm/typings/src/llm-providers/mocked/MockedFackedLlmExecutionTools.d.ts +3 -3
  24. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  25. package/esm/typings/src/llm-providers/remote/interfaces/PromptbookServer_Error.d.ts +2 -6
  26. package/esm/typings/src/llm-providers/remote/startRemoteServer.d.ts +1 -0
  27. package/esm/typings/src/personas/preparePersona.d.ts +1 -0
  28. package/esm/typings/src/prepare/isPipelinePrepared.d.ts +1 -0
  29. package/esm/typings/src/scripting/javascript/JavascriptExecutionToolsOptions.d.ts +2 -2
  30. package/esm/typings/src/storage/_common/PromptbookStorage.d.ts +1 -1
  31. package/esm/typings/src/types/ModelRequirements.d.ts +5 -5
  32. package/esm/typings/src/types/PipelineJson/Expectations.d.ts +3 -1
  33. package/esm/typings/src/types/PipelineJson/KnowledgePieceJson.d.ts +2 -0
  34. package/esm/typings/src/types/PipelineJson/KnowledgeSourceJson.d.ts +4 -0
  35. package/esm/typings/src/types/PipelineJson/LlmTemplateJson.d.ts +2 -0
  36. package/esm/typings/src/types/PipelineJson/PersonaJson.d.ts +4 -0
  37. package/esm/typings/src/types/PipelineJson/PipelineJson.d.ts +2 -0
  38. package/esm/typings/src/types/PipelineJson/PromptDialogJson.d.ts +1 -0
  39. package/esm/typings/src/types/PipelineJson/PromptTemplateJson.d.ts +2 -0
  40. package/esm/typings/src/types/PipelineJson/PromptTemplateJsonCommon.d.ts +2 -2
  41. package/esm/typings/src/types/PipelineJson/PromptTemplateParameterJson.d.ts +2 -0
  42. package/esm/typings/src/types/PipelineJson/ScriptJson.d.ts +1 -0
  43. package/esm/typings/src/types/PipelineJson/SimpleTemplateJson.d.ts +1 -0
  44. package/esm/typings/src/types/Prompt.d.ts +7 -7
  45. package/esm/typings/src/types/ScriptLanguage.d.ts +2 -0
  46. package/esm/typings/src/types/execution-report/ExecutionPromptReportJson.d.ts +24 -0
  47. package/esm/typings/src/types/execution-report/ExecutionReportJson.d.ts +3 -20
  48. package/esm/typings/src/types/typeAliases.d.ts +7 -0
  49. package/esm/typings/src/utils/environment/$getGlobalScope.d.ts +1 -4
  50. package/esm/typings/src/utils/serialization/$asDeeplyFrozenSerializableJson.d.ts +17 -0
  51. package/esm/typings/src/utils/{deepFreeze.d.ts → serialization/$deepFreeze.d.ts} +0 -10
  52. package/esm/typings/src/utils/serialization/checkSerializableAsJson.d.ts +27 -0
  53. package/esm/typings/src/utils/{clonePipeline.d.ts → serialization/clonePipeline.d.ts} +1 -1
  54. package/esm/typings/src/utils/serialization/isSerializableAsJson.d.ts +24 -0
  55. package/esm/typings/src/utils/serialization/isSerializableAsJson.test.d.ts +1 -0
  56. package/package.json +1 -1
  57. package/umd/index.umd.js +406 -226
  58. package/umd/index.umd.js.map +1 -1
  59. package/esm/typings/src/errors/VersionMismatchError.d.ts +0 -10
  60. /package/esm/typings/src/utils/{deepClone.d.ts → serialization/deepClone.d.ts} +0 -0
package/umd/index.umd.js CHANGED
@@ -39,7 +39,7 @@
39
39
  /**
40
40
  * The version of the Promptbook library
41
41
  */
42
- var PROMPTBOOK_VERSION = '0.66.0-9';
42
+ var PROMPTBOOK_VERSION = '0.67.0-0';
43
43
  // TODO: !!!! List here all the versions and annotate + put into script
44
44
 
45
45
  /*! *****************************************************************************
@@ -186,6 +186,26 @@
186
186
  */
187
187
  var $isRunningInNode = new Function("\n try {\n return this === global;\n } catch (e) {\n return false;\n }\n");
188
188
 
189
+ /**
190
+ * Returns the same value that is passed as argument.
191
+ * No side effects.
192
+ *
193
+ * Note: It can be usefull for:
194
+ *
195
+ * 1) Leveling indentation
196
+ * 2) Putting always-true or always-false conditions without getting eslint errors
197
+ *
198
+ * @param value any values
199
+ * @returns the same values
200
+ * @private within the repository
201
+ */
202
+ function just(value) {
203
+ if (value === undefined) {
204
+ return undefined;
205
+ }
206
+ return value;
207
+ }
208
+
189
209
  /**
190
210
  * @@@
191
211
  *
@@ -216,42 +236,169 @@
216
236
  }
217
237
  return Object.freeze(objectValue);
218
238
  }
239
+ /**
240
+ * TODO: [🧠] Is there a way how to meaningfully test this utility
241
+ */
242
+
243
+ /**
244
+ * This error type indicates that the error should not happen and its last check before crashing with some other error
245
+ *
246
+ * @public exported from `@promptbook/core`
247
+ */
248
+ var UnexpectedError = /** @class */ (function (_super) {
249
+ __extends(UnexpectedError, _super);
250
+ function UnexpectedError(message) {
251
+ var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n https://github.com/webgptorg/promptbook/issues\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
252
+ _this.name = 'UnexpectedError';
253
+ Object.setPrototypeOf(_this, UnexpectedError.prototype);
254
+ return _this;
255
+ }
256
+ return UnexpectedError;
257
+ }(Error));
258
+
259
+ /**
260
+ * Checks if the value is [🚉] serializable as JSON
261
+ * If not, throws an UnexpectedError with a rich error message and tracking
262
+ *
263
+ * - Almost all primitives are serializable BUT:
264
+ * - `undefined` is not serializable
265
+ * - `NaN` is not serializable
266
+ * - Objects and arrays are serializable if all their properties are serializable
267
+ * - Functions are not serializable
268
+ * - Circular references are not serializable
269
+ * - `Date` objects are not serializable
270
+ * - `Map` and `Set` objects are not serializable
271
+ * - `RegExp` objects are not serializable
272
+ * - `Error` objects are not serializable
273
+ * - `Symbol` objects are not serializable
274
+ * - And much more...
275
+ *
276
+ * @throws UnexpectedError if the value is not serializable as JSON
277
+ * @public exported from `@promptbook/utils`
278
+ */
279
+ function checkSerializableAsJson(name, value) {
280
+ var e_1, _a;
281
+ if (value === undefined) {
282
+ throw new UnexpectedError("".concat(name, " is undefined"));
283
+ }
284
+ else if (value === null) {
285
+ return;
286
+ }
287
+ else if (typeof value === 'boolean') {
288
+ return;
289
+ }
290
+ else if (typeof value === 'number' && !isNaN(value)) {
291
+ return;
292
+ }
293
+ else if (typeof value === 'string') {
294
+ return;
295
+ }
296
+ else if (typeof value === 'symbol') {
297
+ throw new UnexpectedError("".concat(name, " is symbol"));
298
+ }
299
+ else if (typeof value === 'function') {
300
+ throw new UnexpectedError("".concat(name, " is function"));
301
+ }
302
+ else if (typeof value === 'object' && Array.isArray(value)) {
303
+ for (var i = 0; i < value.length; i++) {
304
+ checkSerializableAsJson("".concat(name, "[").concat(i, "]"), value[i]);
305
+ }
306
+ }
307
+ else if (typeof value === 'object') {
308
+ if (value instanceof Date) {
309
+ throw new UnexpectedError(spaceTrim__default["default"]("\n ".concat(name, " is Date\n\n Use `string_date_iso8601` instead\n ")));
310
+ }
311
+ else if (value instanceof Map) {
312
+ throw new UnexpectedError("".concat(name, " is Map"));
313
+ }
314
+ else if (value instanceof Set) {
315
+ throw new UnexpectedError("".concat(name, " is Set"));
316
+ }
317
+ else if (value instanceof RegExp) {
318
+ throw new UnexpectedError("".concat(name, " is RegExp"));
319
+ }
320
+ else if (value instanceof Error) {
321
+ throw new UnexpectedError(spaceTrim__default["default"]("\n ".concat(name, " is unserialized Error\n\n Use function `serializeError`\n ")));
322
+ }
323
+ else {
324
+ try {
325
+ for (var _b = __values(Object.entries(value)), _c = _b.next(); !_c.done; _c = _b.next()) {
326
+ var _d = __read(_c.value, 2), subName = _d[0], subValue = _d[1];
327
+ if (subValue === undefined) {
328
+ // Note: undefined in object is serializable - it is just omited
329
+ continue;
330
+ }
331
+ checkSerializableAsJson("".concat(name, ".").concat(subName), subValue);
332
+ }
333
+ }
334
+ catch (e_1_1) { e_1 = { error: e_1_1 }; }
335
+ finally {
336
+ try {
337
+ if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
338
+ }
339
+ finally { if (e_1) throw e_1.error; }
340
+ }
341
+ try {
342
+ JSON.stringify(value); // <- TODO: [0]
343
+ }
344
+ catch (error) {
345
+ if (!(error instanceof Error)) {
346
+ throw error;
347
+ }
348
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n ".concat(name, " is not serializable\n\n ").concat(block(error.toString()), "\n "); }));
349
+ }
350
+ /*
351
+ TODO: [0] Is there some more elegant way to check circular references?
352
+ const seen = new Set();
353
+ const stack = [{ value }];
354
+ while (stack.length > 0) {
355
+ const { value } = stack.pop()!;
356
+ if (typeof value === 'object' && value !== null) {
357
+ if (seen.has(value)) {
358
+ throw new UnexpectedError(`${name} has circular reference`);
359
+ }
360
+ seen.add(value);
361
+ if (Array.isArray(value)) {
362
+ stack.push(...value.map((value) => ({ value })));
363
+ } else {
364
+ stack.push(...Object.values(value).map((value) => ({ value })));
365
+ }
366
+ }
367
+ }
368
+ */
369
+ return;
370
+ }
371
+ }
372
+ else {
373
+ throw new UnexpectedError("".concat(name, " is unknown"));
374
+ }
375
+ }
376
+ /**
377
+ * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
378
+ * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
379
+ * Note: [🐠] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
380
+ */
381
+
219
382
  /**
220
383
  * @@@
221
384
  * @@@
222
385
  *
223
386
  * Note: This function mutates the object and returns the original (but mutated-deep-freezed) object
224
387
  *
388
+ * @param name - Name of the object for debugging purposes
389
+ * @param objectValue - Object to be deeply frozen
225
390
  * @returns The same object as the input, but deeply frozen
226
391
  * @private this is in comparison to `deepFreeze` a more specific utility and maybe not very good practice to use without specific reason and considerations
227
392
  */
228
- function deepFreezeWithSameType(objectValue) {
393
+ function $asDeeplyFrozenSerializableJson(name, objectValue) {
394
+ checkSerializableAsJson(name, objectValue);
229
395
  return $deepFreeze(objectValue);
230
396
  }
231
397
  /**
398
+ * TODO: [🧠][🛣] More elegant way to tracking than passing `name`
232
399
  * TODO: [🧠] Is there a way how to meaningfully test this utility
233
400
  */
234
401
 
235
- /**
236
- * Returns the same value that is passed as argument.
237
- * No side effects.
238
- *
239
- * Note: It can be usefull for:
240
- *
241
- * 1) Leveling indentation
242
- * 2) Putting always-true or always-false conditions without getting eslint errors
243
- *
244
- * @param value any values
245
- * @returns the same values
246
- * @private within the repository
247
- */
248
- function just(value) {
249
- if (value === undefined) {
250
- return undefined;
251
- }
252
- return value;
253
- }
254
-
255
402
  /**
256
403
  * Warning message for the generated sections and files files
257
404
  *
@@ -320,7 +467,7 @@
320
467
  *
321
468
  * @public exported from `@promptbook/core`
322
469
  */
323
- var RESERVED_PARAMETER_NAMES = $deepFreeze([
470
+ var RESERVED_PARAMETER_NAMES = $asDeeplyFrozenSerializableJson('RESERVED_PARAMETER_NAMES', [
324
471
  'content',
325
472
  'context',
326
473
  'knowledge',
@@ -893,7 +1040,7 @@
893
1040
  });
894
1041
  }
895
1042
 
896
- var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.66.0-9",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
1043
+ var PipelineCollection = [{title:"Prepare Knowledge from Markdown",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-from-markdown.ptbk.md",promptbookVersion:"0.67.0-0",parameters:[{name:"knowledgeContent",description:"Markdown document content",isInput:true,isOutput:false},{name:"knowledgePieces",description:"The knowledge JSON object",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, extract the important knowledge from the document.\n\n# Rules\n\n- Make pieces of information concise, clear, and easy to understand\n- One piece of information should be approximately 1 paragraph\n- Divide the paragraphs by markdown horizontal lines ---\n- Omit irrelevant information\n- Group redundant information\n- Write just extracted information, nothing else\n\n# The document\n\nTake information from this document:\n\n> {knowledgeContent}",dependentParameterNames:["knowledgeContent"],resultingParameterName:"knowledgePieces"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-from-markdown.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-keywords.ptbk.md",promptbookVersion:"0.67.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"keywords",description:"Keywords separated by comma",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced data researcher, detect the important keywords in the document.\n\n# Rules\n\n- Write just keywords separated by comma\n\n# The document\n\nTake information from this document:\n\n> {knowledgePieceContent}",dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"keywords"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-keywords.ptbk.md"},{title:"Prepare Title",pipelineUrl:"https://promptbook.studio/promptbook/prepare-knowledge-title.ptbk.md",promptbookVersion:"0.67.0-0",parameters:[{name:"knowledgePieceContent",description:"The content",isInput:true,isOutput:false},{name:"title",description:"The title of the document",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"knowledge",title:"Knowledge",modelRequirements:{modelVariant:"CHAT",modelName:"claude-3-opus-20240229"},content:"You are experienced content creator, write best title for the document.\n\n# Rules\n\n- Write just title, nothing else\n- Title should be concise and clear\n- Write maximum 5 words for the title\n\n# The document\n\n> {knowledgePieceContent}",expectations:{words:{min:1,max:8}},dependentParameterNames:["knowledgePieceContent"],resultingParameterName:"title"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-knowledge-title.ptbk.md"},{title:"Prepare Keywords",pipelineUrl:"https://promptbook.studio/promptbook/prepare-persona.ptbk.md",promptbookVersion:"0.67.0-0",parameters:[{name:"availableModelNames",description:"List of available model names separated by comma (,)",isInput:true,isOutput:false},{name:"personaDescription",description:"Description of the persona",isInput:true,isOutput:false},{name:"modelRequirements",description:"Specific requirements for the model",isInput:false,isOutput:true}],promptTemplates:[{blockType:"PROMPT_TEMPLATE",name:"make-model-requirements",title:"Make modelRequirements",modelRequirements:{modelVariant:"CHAT",modelName:"gpt-4-turbo"},content:"You are experienced AI engineer, you need to create virtual assistant.\nWrite\n\n## Sample\n\n```json\n{\n\"modelName\": \"gpt-4o\",\n\"systemMessage\": \"You are experienced AI engineer and helpfull assistant.\",\n\"temperature\": 0.7\n}\n```\n\n## Instructions\n\n### Option `modelName`\n\nPick from the following models:\n\n- {availableModelNames}\n\n### Option `systemMessage`\n\nThe system message is used to communicate instructions or provide context to the model at the beginning of a conversation. It is displayed in a different format compared to user messages, helping the model understand its role in the conversation. The system message typically guides the model's behavior, sets the tone, or specifies desired output from the model. By utilizing the system message effectively, users can steer the model towards generating more accurate and relevant responses.\n\nFor example:\n\n> You are an experienced AI engineer and helpful assistant.\n\n> You are a friendly and knowledgeable chatbot.\n\n### Option `temperature`\n\nThe sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.\n\nYou can pick a value between 0 and 2. For example:\n\n- `0.1`: Low temperature, extremely conservative and deterministic\n- `0.5`: Medium temperature, balanced between conservative and creative\n- `1.0`: High temperature, creative and bit random\n- `1.5`: Very high temperature, extremely creative and often chaotic and unpredictable\n- `2.0`: Maximum temperature, completely random and unpredictable, for some extreme creative use cases\n\n# The assistant\n\nTake this description of the persona:\n\n> {personaDescription}",expectFormat:"JSON",dependentParameterNames:["availableModelNames","personaDescription"],resultingParameterName:"modelRequirements"}],knowledgeSources:[],knowledgePieces:[],personas:[],preparations:[],sourceFile:"./promptbook-collection/prepare-persona.ptbk.md"}];
897
1044
 
898
1045
  /**
899
1046
  * This error indicates that the promptbook in a markdown format cannot be parsed into a valid promptbook object
@@ -927,22 +1074,6 @@
927
1074
  return PipelineLogicError;
928
1075
  }(Error));
929
1076
 
930
- /**
931
- * This error type indicates that the error should not happen and its last check before crashing with some other error
932
- *
933
- * @public exported from `@promptbook/core`
934
- */
935
- var UnexpectedError = /** @class */ (function (_super) {
936
- __extends(UnexpectedError, _super);
937
- function UnexpectedError(message) {
938
- var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This error should not happen.\n It's probbably a bug in the pipeline collection\n\n Please report issue:\n https://github.com/webgptorg/promptbook/issues\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
939
- _this.name = 'UnexpectedError';
940
- Object.setPrototypeOf(_this, UnexpectedError.prototype);
941
- return _this;
942
- }
943
- return UnexpectedError;
944
- }(Error));
945
-
946
1077
  /**
947
1078
  * Tests if given string is valid semantic version
948
1079
  *
@@ -1356,15 +1487,15 @@
1356
1487
  *
1357
1488
  * @public exported from `@promptbook/core`
1358
1489
  */
1359
- var ReferenceError$1 = /** @class */ (function (_super) {
1360
- __extends(ReferenceError, _super);
1361
- function ReferenceError(message) {
1490
+ var PipelineUrlError = /** @class */ (function (_super) {
1491
+ __extends(PipelineUrlError, _super);
1492
+ function PipelineUrlError(message) {
1362
1493
  var _this = _super.call(this, message) || this;
1363
- _this.name = 'ReferenceError';
1364
- Object.setPrototypeOf(_this, ReferenceError.prototype);
1494
+ _this.name = 'PipelineUrlError';
1495
+ Object.setPrototypeOf(_this, PipelineUrlError.prototype);
1365
1496
  return _this;
1366
1497
  }
1367
- return ReferenceError;
1498
+ return PipelineUrlError;
1368
1499
  }(Error));
1369
1500
 
1370
1501
  /**
@@ -1412,7 +1543,7 @@
1412
1543
  delete promptTemplateUnprepared.preparedContent;
1413
1544
  return promptTemplateUnprepared;
1414
1545
  });
1415
- return __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] });
1546
+ return $asDeeplyFrozenSerializableJson('Unprepared PipelineJson', __assign(__assign({}, pipeline), { promptTemplates: promptTemplates, knowledgeSources: knowledgeSources, knowledgePieces: [], personas: personas, preparations: [] }));
1416
1547
  }
1417
1548
  /**
1418
1549
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -1448,7 +1579,7 @@
1448
1579
  var pipeline = pipelines_1_1.value;
1449
1580
  // TODO: [👠] DRY
1450
1581
  if (pipeline.pipelineUrl === undefined) {
1451
- throw new ReferenceError$1(spaceTrim.spaceTrim("\n Pipeline with name \"".concat(pipeline.title, "\" does not have defined URL\n\n File:\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines without URLs are called anonymous pipelines\n They can be used as standalone pipelines, but they cannot be referenced by other pipelines\n And also they cannot be used in the pipeline collection\n\n ")));
1582
+ throw new PipelineUrlError(spaceTrim.spaceTrim("\n Pipeline with name \"".concat(pipeline.title, "\" does not have defined URL\n\n File:\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: Pipelines without URLs are called anonymous pipelines\n They can be used as standalone pipelines, but they cannot be referenced by other pipelines\n And also they cannot be used in the pipeline collection\n\n ")));
1452
1583
  }
1453
1584
  // Note: [🐨]
1454
1585
  validatePipeline(pipeline);
@@ -1460,7 +1591,7 @@
1460
1591
  pipelineJsonToString(unpreparePipeline(pipeline)) !==
1461
1592
  pipelineJsonToString(unpreparePipeline(this.collection.get(pipeline.pipelineUrl)))) {
1462
1593
  var existing = this.collection.get(pipeline.pipelineUrl);
1463
- throw new ReferenceError$1(spaceTrim.spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4E\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1594
+ throw new PipelineUrlError(spaceTrim.spaceTrim("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4E\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
1464
1595
  }
1465
1596
  // Note: [🧠] Overwrite existing pipeline with the same URL
1466
1597
  this.collection.set(pipeline.pipelineUrl, pipeline);
@@ -1905,9 +2036,89 @@
1905
2036
  }(Error));
1906
2037
 
1907
2038
  /**
1908
- * Asserts that the execution of a promptnook is successful
2039
+ * This error indicates that the pipeline collection cannot be propperly loaded
2040
+ *
2041
+ * @public exported from `@promptbook/core`
2042
+ */
2043
+ var CollectionError = /** @class */ (function (_super) {
2044
+ __extends(CollectionError, _super);
2045
+ function CollectionError(message) {
2046
+ var _this = _super.call(this, message) || this;
2047
+ _this.name = 'CollectionError';
2048
+ Object.setPrototypeOf(_this, CollectionError.prototype);
2049
+ return _this;
2050
+ }
2051
+ return CollectionError;
2052
+ }(Error));
2053
+
2054
+ /**
2055
+ * This error type indicates that some limit was reached
2056
+ *
2057
+ * @public exported from `@promptbook/core`
2058
+ */
2059
+ var LimitReachedError = /** @class */ (function (_super) {
2060
+ __extends(LimitReachedError, _super);
2061
+ function LimitReachedError(message) {
2062
+ var _this = _super.call(this, message) || this;
2063
+ _this.name = 'LimitReachedError';
2064
+ Object.setPrototypeOf(_this, LimitReachedError.prototype);
2065
+ return _this;
2066
+ }
2067
+ return LimitReachedError;
2068
+ }(Error));
2069
+
2070
+ /**
2071
+ * This error type indicates that some part of the code is not implemented yet
2072
+ *
2073
+ * @public exported from `@promptbook/core`
2074
+ */
2075
+ var NotYetImplementedError = /** @class */ (function (_super) {
2076
+ __extends(NotYetImplementedError, _super);
2077
+ function NotYetImplementedError(message) {
2078
+ var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This feature is not implemented yet but it will be soon.\n\n If you want speed up the implementation or just read more, look here:\n https://github.com/webgptorg/promptbook\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
2079
+ _this.name = 'NotYetImplementedError';
2080
+ Object.setPrototypeOf(_this, NotYetImplementedError.prototype);
2081
+ return _this;
2082
+ }
2083
+ return NotYetImplementedError;
2084
+ }(Error));
2085
+
2086
+ /**
2087
+ * Index of all custom errors
2088
+ *
2089
+ * @public exported from `@promptbook/core`
2090
+ */
2091
+ var ERRORS = {
2092
+ CollectionError: CollectionError,
2093
+ EnvironmentMismatchError: EnvironmentMismatchError,
2094
+ LimitReachedError: LimitReachedError,
2095
+ NotFoundError: NotFoundError,
2096
+ NotYetImplementedError: NotYetImplementedError,
2097
+ ParsingError: ParsingError,
2098
+ PipelineExecutionError: PipelineExecutionError,
2099
+ PipelineLogicError: PipelineLogicError,
2100
+ PipelineUrlError: PipelineUrlError,
2101
+ UnexpectedError: UnexpectedError,
2102
+ // TODO: [🪑]> VersionMismatchError,
2103
+ };
2104
+
2105
+ /**
2106
+ * Deserializes the error object
2107
+ *
2108
+ * @public exported from `@promptbook/utils`
2109
+ */
2110
+ function deserializeError(error) {
2111
+ if (error.name === 'Error') {
2112
+ return new Error(error.message);
2113
+ }
2114
+ var CustomError = ERRORS[error.name];
2115
+ return new CustomError(error.message);
2116
+ }
2117
+
2118
+ /**
2119
+ * Asserts that the execution of a Promptbook is successful
1909
2120
  *
1910
- * @param executionResult - The partial result of the promptnook execution
2121
+ * @param executionResult - The partial result of the Promptbook execution
1911
2122
  * @throws {PipelineExecutionError} If the execution is not successful or if multiple errors occurred
1912
2123
  * @public exported from `@promptbook/core`
1913
2124
  */
@@ -1917,15 +2128,16 @@
1917
2128
  return;
1918
2129
  }
1919
2130
  if (errors.length === 0) {
1920
- throw new PipelineExecutionError("Promptnook Execution failed because of unknown reason");
2131
+ throw new PipelineExecutionError("Promptbook Execution failed because of unknown reason");
1921
2132
  }
1922
2133
  else if (errors.length === 1) {
1923
- throw errors[0];
2134
+ throw deserializeError(errors[0]);
1924
2135
  }
1925
2136
  else {
1926
- throw new PipelineExecutionError(spaceTrim.spaceTrim(function (block) { return "\n Multiple errors occurred during promptnook execution\n\n ".concat(block(errors
1927
- .map(function (error, index) {
1928
- return spaceTrim.spaceTrim(function (block) { return "\n Error ".concat(index + 1, ":\n ").concat(block(error.stack || error.message), "\n "); });
2137
+ throw new PipelineExecutionError(spaceTrim.spaceTrim(function (block) { return "\n Multiple errors occurred during Promptbook execution\n\n ".concat(block(errors
2138
+ .map(function (_a, index) {
2139
+ var name = _a.name, stack = _a.stack, message = _a.message;
2140
+ return spaceTrim.spaceTrim(function (block) { return "\n ".concat(name, " ").concat(index + 1, ":\n ").concat(block(stack || message), "\n "); });
1929
2141
  })
1930
2142
  .join('\n')), "\n "); }));
1931
2143
  }
@@ -1957,7 +2169,7 @@
1957
2169
  var undefinedName = error.message.split(' ')[0];
1958
2170
  /*
1959
2171
  Note: Parsing the error
1960
- [ReferenceError: thing is not defined]
2172
+ [PipelineUrlError: thing is not defined]
1961
2173
  */
1962
2174
  if (!undefinedName) {
1963
2175
  throw error;
@@ -1975,7 +2187,7 @@
1975
2187
  if (!(error instanceof Error)) {
1976
2188
  throw error;
1977
2189
  }
1978
- throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Can not extract variables from the script\n\n ".concat(block(error.name), ": ").concat(block(error.message), "\n "); }));
2190
+ throw new ParsingError(spaceTrim.spaceTrim(function (block) { return "\n Can not extract variables from the script\n\n ".concat(block(error.toString()), "}\n "); }));
1979
2191
  }
1980
2192
  return variables;
1981
2193
  }
@@ -2062,6 +2274,23 @@
2062
2274
  return ExpectError;
2063
2275
  }(Error));
2064
2276
 
2277
+ /**
2278
+ * Serializes an error into a [🚉] JSON-serializable object
2279
+ *
2280
+ * @public exported from `@promptbook/utils`
2281
+ */
2282
+ function serializeError(error) {
2283
+ var name = error.name, message = error.message, stack = error.stack;
2284
+ if (!__spreadArray(['Error'], __read(Object.keys(ERRORS)), false).includes(name)) {
2285
+ throw new UnexpectedError(spaceTrim__default["default"](function (block) { return "\n \n Cannot serialize error with name \"".concat(name, "\"\n\n ").concat(block(stack || message), "\n \n "); }));
2286
+ }
2287
+ return {
2288
+ name: name,
2289
+ message: message,
2290
+ stack: stack,
2291
+ };
2292
+ }
2293
+
2065
2294
  /**
2066
2295
  * Function isValidJsonString will tell you if the string is valid JSON or not
2067
2296
  *
@@ -2399,6 +2628,7 @@
2399
2628
  return true;
2400
2629
  }
2401
2630
  /**
2631
+ * TODO: [🔃] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
2402
2632
  * TODO: [🐠] Maybe base this on `makeValidator`
2403
2633
  * TODO: [🧊] Pipeline can be partially prepared, this should return true ONLY if fully prepared
2404
2634
  * TODO: [🧿] Maybe do same process with same granularity and subfinctions as `preparePipeline`
@@ -2443,22 +2673,6 @@
2443
2673
  }
2444
2674
  }
2445
2675
 
2446
- /**
2447
- * This error type indicates that some limit was reached
2448
- *
2449
- * @public exported from `@promptbook/core`
2450
- */
2451
- var LimitReachedError = /** @class */ (function (_super) {
2452
- __extends(LimitReachedError, _super);
2453
- function LimitReachedError(message) {
2454
- var _this = _super.call(this, message) || this;
2455
- _this.name = 'LimitReachedError';
2456
- Object.setPrototypeOf(_this, LimitReachedError.prototype);
2457
- return _this;
2458
- }
2459
- return LimitReachedError;
2460
- }(Error));
2461
-
2462
2676
  /**
2463
2677
  * Replaces parameters in template with values from parameters object
2464
2678
  *
@@ -2844,7 +3058,6 @@
2844
3058
  return __awaiter(this, void 0, void 0, function () {
2845
3059
  var name, title, priority, usedParameterNames, dependentParameterNames, definedParameters, _a, _b, _c, definedParameterNames, parameters, _d, _e, parameterName, prompt, chatResult, completionResult, embeddingResult, result, resultString, expectError, scriptPipelineExecutionErrors, maxAttempts, jokerParameterNames, preparedContent, attempt, isJokerAttempt, jokerParameterName, _f, _g, _h, _j, scriptTools, error_2, e_4_1, _k, _l, functionName, postprocessingError, _m, _o, scriptTools, error_3, e_5_1, e_6_1, error_4;
2846
3060
  var e_7, _p, e_4, _q, e_6, _r, e_5, _s, _t;
2847
- var _this = this;
2848
3061
  return __generator(this, function (_u) {
2849
3062
  switch (_u.label) {
2850
3063
  case 0:
@@ -2965,71 +3178,8 @@
2965
3178
  return name === currentTemplate.personaName;
2966
3179
  }) || {})), currentTemplate.expectations),
2967
3180
  expectFormat: currentTemplate.expectFormat,
2968
- postprocessing: (currentTemplate.postprocessingFunctionNames || []).map(function (functionName) { return function (result) { return __awaiter(_this, void 0, void 0, function () {
2969
- var errors, _a, _b, scriptTools, error_5, e_8_1;
2970
- var e_8, _c;
2971
- return __generator(this, function (_d) {
2972
- switch (_d.label) {
2973
- case 0:
2974
- errors = [];
2975
- _d.label = 1;
2976
- case 1:
2977
- _d.trys.push([1, 8, 9, 10]);
2978
- _a = __values(arrayableToArray(tools.script)), _b = _a.next();
2979
- _d.label = 2;
2980
- case 2:
2981
- if (!!_b.done) return [3 /*break*/, 7];
2982
- scriptTools = _b.value;
2983
- _d.label = 3;
2984
- case 3:
2985
- _d.trys.push([3, 5, , 6]);
2986
- return [4 /*yield*/, scriptTools.execute({
2987
- scriptLanguage: "javascript" /* <- TODO: Try it in each languages; In future allow postprocessing with arbitrary combination of languages to combine */,
2988
- script: "".concat(functionName, "(result)"),
2989
- parameters: {
2990
- result: result || '',
2991
- // Note: No ...parametersForTemplate, because working with result only
2992
- },
2993
- })];
2994
- case 4: return [2 /*return*/, _d.sent()];
2995
- case 5:
2996
- error_5 = _d.sent();
2997
- if (!(error_5 instanceof Error)) {
2998
- throw error_5;
2999
- }
3000
- if (error_5 instanceof UnexpectedError) {
3001
- throw error_5;
3002
- }
3003
- errors.push(error_5);
3004
- return [3 /*break*/, 6];
3005
- case 6:
3006
- _b = _a.next();
3007
- return [3 /*break*/, 2];
3008
- case 7: return [3 /*break*/, 10];
3009
- case 8:
3010
- e_8_1 = _d.sent();
3011
- e_8 = { error: e_8_1 };
3012
- return [3 /*break*/, 10];
3013
- case 9:
3014
- try {
3015
- if (_b && !_b.done && (_c = _a.return)) _c.call(_a);
3016
- }
3017
- finally { if (e_8) throw e_8.error; }
3018
- return [7 /*endfinally*/];
3019
- case 10:
3020
- if (errors.length === 0) {
3021
- throw new PipelineExecutionError('Postprocessing in LlmExecutionTools failed because no ScriptExecutionTools were provided');
3022
- }
3023
- else if (errors.length === 1) {
3024
- throw errors[0];
3025
- }
3026
- else {
3027
- throw new PipelineExecutionError(spaceTrim.spaceTrim(function (block) { return "\n Postprocessing in LlmExecutionTools failed ".concat(errors.length, "x\n\n ").concat(block(errors.map(function (error) { return '- ' + error.message; }).join('\n\n')), "\n "); }));
3028
- }
3029
- }
3030
- });
3031
- }); }; }),
3032
- };
3181
+ postprocessingFunctionNames: currentTemplate.postprocessingFunctionNames,
3182
+ }; // <- TODO: Not very good type guard
3033
3183
  _g = currentTemplate.modelRequirements.modelVariant;
3034
3184
  switch (_g) {
3035
3185
  case 'CHAT': return [3 /*break*/, 8];
@@ -3254,7 +3404,7 @@
3254
3404
  executionReport.promptExecutions.push({
3255
3405
  prompt: __assign({}, prompt),
3256
3406
  result: result || undefined,
3257
- error: expectError || undefined,
3407
+ error: expectError === null ? undefined : serializeError(expectError),
3258
3408
  });
3259
3409
  }
3260
3410
  return [7 /*endfinally*/];
@@ -3289,7 +3439,7 @@
3289
3439
  });
3290
3440
  }
3291
3441
  function filterJustOutputParameters() {
3292
- var e_9, _a;
3442
+ var e_8, _a;
3293
3443
  var outputParameters = {};
3294
3444
  try {
3295
3445
  // Note: Filter ONLY output parameters
@@ -3306,12 +3456,12 @@
3306
3456
  outputParameters[parameter.name] = parametersToPass[parameter.name] || '';
3307
3457
  }
3308
3458
  }
3309
- catch (e_9_1) { e_9 = { error: e_9_1 }; }
3459
+ catch (e_8_1) { e_8 = { error: e_8_1 }; }
3310
3460
  finally {
3311
3461
  try {
3312
3462
  if (_c && !_c.done && (_a = _b.return)) _a.call(_b);
3313
3463
  }
3314
- finally { if (e_9) throw e_9.error; }
3464
+ finally { if (e_8) throw e_8.error; }
3315
3465
  }
3316
3466
  return outputParameters;
3317
3467
  }
@@ -3348,11 +3498,11 @@
3348
3498
  })), _b = _a.next(); !_b.done; _b = _a.next()) {
3349
3499
  parameter = _b.value;
3350
3500
  if (inputParameters[parameter.name] === undefined) {
3351
- return [2 /*return*/, deepFreezeWithSameType({
3501
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson("Unuccessful PipelineExecutorResult (with missing parameter {".concat(parameter.name, "}) PipelineExecutorResult"), {
3352
3502
  isSuccessful: false,
3353
3503
  errors: __spreadArray([
3354
3504
  new PipelineExecutionError("Parameter {".concat(parameter.name, "} is required as an input parameter"))
3355
- ], __read(errors), false),
3505
+ ], __read(errors), false).map(serializeError),
3356
3506
  warnings: [],
3357
3507
  executionReport: executionReport,
3358
3508
  outputParameters: {},
@@ -3378,12 +3528,12 @@
3378
3528
  warnings.push(new PipelineExecutionError("Extra parameter {".concat(parameterName, "} is being passed which is not part of the pipeline.")));
3379
3529
  }
3380
3530
  else if (parameter.isInput === false) {
3381
- return { value: deepFreezeWithSameType({
3531
+ return { value: $asDeeplyFrozenSerializableJson("Unuccessful PipelineExecutorResult (with extra parameter {".concat(parameter.name, "}) PipelineExecutorResult"), {
3382
3532
  isSuccessful: false,
3383
3533
  errors: __spreadArray([
3384
3534
  new PipelineExecutionError("Parameter {".concat(parameter.name, "} is passed as input parameter but it is not input"))
3385
- ], __read(errors), false),
3386
- warnings: warnings,
3535
+ ], __read(errors), false).map(serializeError),
3536
+ warnings: warnings.map(serializeError),
3387
3537
  executionReport: executionReport,
3388
3538
  outputParameters: {},
3389
3539
  usage: ZERO_USAGE,
@@ -3490,10 +3640,10 @@
3490
3640
  return (result === null || result === void 0 ? void 0 : result.usage) || ZERO_USAGE;
3491
3641
  })), false));
3492
3642
  outputParameters_1 = filterJustOutputParameters();
3493
- return [2 /*return*/, deepFreezeWithSameType({
3643
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('Unuccessful PipelineExecutorResult (with misc errors) PipelineExecutorResult', {
3494
3644
  isSuccessful: false,
3495
- errors: __spreadArray([error_1], __read(errors), false),
3496
- warnings: warnings,
3645
+ errors: __spreadArray([error_1], __read(errors), false).map(serializeError),
3646
+ warnings: warnings.map(serializeError),
3497
3647
  usage: usage_1,
3498
3648
  executionReport: executionReport,
3499
3649
  outputParameters: outputParameters_1,
@@ -3505,10 +3655,10 @@
3505
3655
  return (result === null || result === void 0 ? void 0 : result.usage) || ZERO_USAGE;
3506
3656
  })), false));
3507
3657
  outputParameters = filterJustOutputParameters();
3508
- return [2 /*return*/, deepFreezeWithSameType({
3658
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('Successful PipelineExecutorResult', {
3509
3659
  isSuccessful: true,
3510
- errors: errors,
3511
- warnings: warnings,
3660
+ errors: errors.map(serializeError),
3661
+ warnings: warnings.map(serializeError),
3512
3662
  usage: usage,
3513
3663
  executionReport: executionReport,
3514
3664
  outputParameters: outputParameters,
@@ -3878,6 +4028,7 @@
3878
4028
  });
3879
4029
  }
3880
4030
  /**
4031
+ * TODO: [🔃] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
3881
4032
  * TODO: [🏢] !! Check validity of `modelName` in pipeline
3882
4033
  * TODO: [🏢] !! Check validity of `systemMessage` in pipeline
3883
4034
  * TODO: [🏢] !! Check validity of `temperature` in pipeline
@@ -3978,7 +4129,7 @@
3978
4129
  return __awaiter(this, void 0, void 0, function () {
3979
4130
  var llmTools, _a, maxParallelCount, _b, isVerbose, parameters, promptTemplates,
3980
4131
  /*
3981
- <- TODO: [🧠][0] `promptbookVersion` */
4132
+ <- TODO: [🧠][🪑] `promptbookVersion` */
3982
4133
  knowledgeSources /*
3983
4134
  <- TODO: [🧊] `knowledgePieces` */, personas /*
3984
4135
  <- TODO: [🧊] `preparations` */, llmToolsWithUsage, currentPreparation, preparations, preparedPersonas, knowledgeSourcesPrepared, partialknowledgePiecesPrepared, knowledgePiecesPrepared, promptTemplatesPrepared /* TODO: parameters: parametersPrepared*/;
@@ -4048,7 +4199,7 @@
4048
4199
  // ----- /Templates preparation -----
4049
4200
  // Note: Count total usage
4050
4201
  currentPreparation.usage = llmToolsWithUsage.getTotalUsage();
4051
- return [2 /*return*/, __assign(__assign({}, clonePipeline(pipeline)), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations })];
4202
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('Prepared PipelineJson', __assign(__assign({}, clonePipeline(pipeline)), { promptTemplates: promptTemplatesPrepared, knowledgeSources: knowledgeSourcesPrepared, knowledgePieces: knowledgePiecesPrepared, personas: preparedPersonas, preparations: preparations }))];
4052
4203
  }
4053
4204
  });
4054
4205
  });
@@ -5304,22 +5455,6 @@
5304
5455
  return null;
5305
5456
  }
5306
5457
 
5307
- /**
5308
- * This error type indicates that some part of the code is not implemented yet
5309
- *
5310
- * @public exported from `@promptbook/core`
5311
- */
5312
- var NotYetImplementedError = /** @class */ (function (_super) {
5313
- __extends(NotYetImplementedError, _super);
5314
- function NotYetImplementedError(message) {
5315
- var _this = _super.call(this, spaceTrim.spaceTrim(function (block) { return "\n ".concat(block(message), "\n\n Note: This feature is not implemented yet but it will be soon.\n\n If you want speed up the implementation or just read more, look here:\n https://github.com/webgptorg/promptbook\n\n Or contact us on me@pavolhejny.com\n\n "); })) || this;
5316
- _this.name = 'NotYetImplementedError';
5317
- Object.setPrototypeOf(_this, NotYetImplementedError.prototype);
5318
- return _this;
5319
- }
5320
- return NotYetImplementedError;
5321
- }(Error));
5322
-
5323
5458
  /**
5324
5459
  * Supported script languages
5325
5460
  *
@@ -6011,7 +6146,7 @@
6011
6146
  }
6012
6147
  });
6013
6148
  // =============================================================
6014
- return pipelineJson;
6149
+ return $asDeeplyFrozenSerializableJson('pipelineJson', pipelineJson);
6015
6150
  }
6016
6151
  /**
6017
6152
  * TODO: !!!! Warn if used only sync version
@@ -6055,7 +6190,9 @@
6055
6190
  case 1:
6056
6191
  pipelineJson = _a.sent();
6057
6192
  _a.label = 2;
6058
- case 2: return [2 /*return*/, pipelineJson];
6193
+ case 2:
6194
+ // Note: No need to use `$asDeeplyFrozenSerializableJson` because `pipelineStringToJsonSync` and `preparePipeline` already do that
6195
+ return [2 /*return*/, pipelineJson];
6059
6196
  }
6060
6197
  });
6061
6198
  });
@@ -6066,22 +6203,6 @@
6066
6203
  * TODO: [🧠] Should be in generated JSON file GENERATOR_WARNING
6067
6204
  */
6068
6205
 
6069
- /**
6070
- * This error indicates that the pipeline collection cannot be propperly loaded
6071
- *
6072
- * @public exported from `@promptbook/core`
6073
- */
6074
- var CollectionError = /** @class */ (function (_super) {
6075
- __extends(CollectionError, _super);
6076
- function CollectionError(message) {
6077
- var _this = _super.call(this, message) || this;
6078
- _this.name = 'CollectionError';
6079
- Object.setPrototypeOf(_this, CollectionError.prototype);
6080
- return _this;
6081
- }
6082
- return CollectionError;
6083
- }(Error));
6084
-
6085
6206
  /**
6086
6207
  * Checks if the file exists
6087
6208
  *
@@ -6438,7 +6559,7 @@
6438
6559
  }
6439
6560
  else {
6440
6561
  existing = collection.get(pipeline.pipelineUrl);
6441
- throw new ReferenceError(spaceTrim__default["default"]("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4F\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6562
+ throw new PipelineUrlError(spaceTrim__default["default"]("\n Pipeline with URL \"".concat(pipeline.pipelineUrl, "\" is already in the collection \uD83C\uDF4F\n\n Conflicting files:\n ").concat(existing.sourceFile || 'Unknown', "\n ").concat(pipeline.sourceFile || 'Unknown', "\n\n Note: You have probably forgotten to run \"ptbk make\" to update the collection\n Note: Pipelines with the same URL are not allowed\n Only exepction is when the pipelines are identical\n\n ")));
6442
6563
  }
6443
6564
  }
6444
6565
  }
@@ -6504,6 +6625,39 @@
6504
6625
  * TODO: [🖇] What about symlinks? Maybe option isSymlinksFollowed
6505
6626
  */
6506
6627
 
6628
+ /**
6629
+ * Tests if the value is [🚉] serializable as JSON
6630
+ *
6631
+ * - Almost all primitives are serializable BUT:
6632
+ * - `undefined` is not serializable
6633
+ * - `NaN` is not serializable
6634
+ * - Objects and arrays are serializable if all their properties are serializable
6635
+ * - Functions are not serializable
6636
+ * - Circular references are not serializable
6637
+ * - `Date` objects are not serializable
6638
+ * - `Map` and `Set` objects are not serializable
6639
+ * - `RegExp` objects are not serializable
6640
+ * - `Error` objects are not serializable
6641
+ * - `Symbol` objects are not serializable
6642
+ * - And much more...
6643
+ *
6644
+ *
6645
+ * @public exported from `@promptbook/utils`
6646
+ */
6647
+ function isSerializableAsJson(value) {
6648
+ try {
6649
+ checkSerializableAsJson('', value);
6650
+ return true;
6651
+ }
6652
+ catch (error) {
6653
+ return false;
6654
+ }
6655
+ }
6656
+ /**
6657
+ * TODO: [🧠] !!! In-memory cache of same values to prevent multiple checks
6658
+ * TODO: [🧠][💺] Can be done this on type-level?
6659
+ */
6660
+
6507
6661
  /**
6508
6662
  * Stringify the PipelineJson with proper formatting
6509
6663
  *
@@ -6513,6 +6667,9 @@
6513
6667
  * @public exported from `@promptbook/core`
6514
6668
  */
6515
6669
  function stringifyPipelineJson(pipeline) {
6670
+ if (!isSerializableAsJson(pipeline)) {
6671
+ throw new UnexpectedError(spaceTrim__default["default"]("\n Cannot stringify the pipeline, because it is not serializable as JSON\n\n There can be multiple reasons:\n 1) The pipeline contains circular references\n 2) It is not a valid PipelineJson\n "));
6672
+ }
6516
6673
  var pipelineJsonStringified = JSON.stringify(pipeline, null, 4);
6517
6674
  for (var i = 0; i < LOOP_LIMIT; i++) {
6518
6675
  pipelineJsonStringified = pipelineJsonStringified.replace(/(-?0\.\d+),[\n\s]+(-?0\.\d+)/gms, "$1".concat(REPLACING_NONCE, "$2"));
@@ -6638,6 +6795,9 @@
6638
6795
  switch (_a.label) {
6639
6796
  case 0:
6640
6797
  filename = this.getFilenameForKey(key);
6798
+ if (!isSerializableAsJson(value)) {
6799
+ throw new UnexpectedError("The \"".concat(key, "\" you want to store in JSON file is not serializable as JSON"));
6800
+ }
6641
6801
  fileContent = stringifyPipelineJson(value);
6642
6802
  return [4 /*yield*/, promises.mkdir(path.dirname(filename), { recursive: true })];
6643
6803
  case 1:
@@ -6682,14 +6842,11 @@
6682
6842
  *
6683
6843
  * Note: `$` is used to indicate that this function is not a pure function - it access global scope
6684
6844
  *
6685
- * @public exported from `@promptbook/utils`
6845
+ * @private internal function of `$Register`
6686
6846
  */
6687
6847
  function $getGlobalScope() {
6688
6848
  return Function('return this')();
6689
6849
  }
6690
- /***
6691
- * TODO: !!!!! Make private and promptbook registry from this
6692
- */
6693
6850
 
6694
6851
  /**
6695
6852
  * Register is @@@
@@ -7770,13 +7927,13 @@
7770
7927
  socket.emit('listModels-request', {
7771
7928
  isAnonymous: true,
7772
7929
  llmToolsConfiguration: this.options.llmToolsConfiguration,
7773
- });
7930
+ } /* <- TODO: [🤛] */);
7774
7931
  }
7775
7932
  else {
7776
7933
  socket.emit('listModels-request', {
7777
7934
  isAnonymous: false,
7778
7935
  clientId: this.options.clientId,
7779
- });
7936
+ } /* <- TODO: [🤛] */);
7780
7937
  }
7781
7938
  return [4 /*yield*/, new Promise(function (resolve, reject) {
7782
7939
  socket.on('listModels-response', function (response) {
@@ -7784,7 +7941,7 @@
7784
7941
  socket.disconnect();
7785
7942
  });
7786
7943
  socket.on('error', function (error) {
7787
- reject(new Error(error.errorMessage));
7944
+ reject(deserializeError(error));
7788
7945
  socket.disconnect();
7789
7946
  });
7790
7947
  })];
@@ -7863,16 +8020,14 @@
7863
8020
  isAnonymous: true,
7864
8021
  llmToolsConfiguration: this.options.llmToolsConfiguration,
7865
8022
  prompt: prompt,
7866
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
7867
- });
8023
+ } /* <- TODO: [🤛] */);
7868
8024
  }
7869
8025
  else {
7870
8026
  socket.emit('prompt-request', {
7871
8027
  isAnonymous: false,
7872
8028
  clientId: this.options.clientId,
7873
8029
  prompt: prompt,
7874
- // <- TODO: [🛫] `prompt` is NOT fully serializable as JSON, it contains functions which are not serializable
7875
- });
8030
+ } /* <- TODO: [🤛] */);
7876
8031
  }
7877
8032
  return [4 /*yield*/, new Promise(function (resolve, reject) {
7878
8033
  socket.on('prompt-response', function (response) {
@@ -7880,7 +8035,7 @@
7880
8035
  socket.disconnect();
7881
8036
  });
7882
8037
  socket.on('error', function (error) {
7883
- reject(new PipelineExecutionError(error.errorMessage));
8038
+ reject(deserializeError(error));
7884
8039
  socket.disconnect();
7885
8040
  });
7886
8041
  })];
@@ -7895,6 +8050,7 @@
7895
8050
  return RemoteLlmExecutionTools;
7896
8051
  }());
7897
8052
  /**
8053
+ * TODO: Maybe use `$asDeeplyFrozenSerializableJson`
7898
8054
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
7899
8055
  * TODO: [🍓] Allow to list compatible models with each variant
7900
8056
  * TODO: [🗯] RemoteLlmExecutionTools should extend Destroyable and implement IDestroyable
@@ -7929,7 +8085,7 @@
7929
8085
  * @see https://docs.anthropic.com/en/docs/models-overview
7930
8086
  * @public exported from `@promptbook/anthropic-claude`
7931
8087
  */
7932
- var ANTHROPIC_CLAUDE_MODELS = [
8088
+ var ANTHROPIC_CLAUDE_MODELS = $asDeeplyFrozenSerializableJson('ANTHROPIC_CLAUDE_MODELS', [
7933
8089
  {
7934
8090
  modelVariant: 'CHAT',
7935
8091
  modelTitle: 'Claude 3.5 Sonnet',
@@ -7994,7 +8150,7 @@
7994
8150
  },
7995
8151
  },
7996
8152
  // TODO: !!! Claude 1 and 2 has also completion versions - ask Hoagy
7997
- ];
8153
+ ]);
7998
8154
  /**
7999
8155
  * Note: [🤖] Add models of new variant
8000
8156
  * TODO: [🧠] !!! Add embedding models OR Anthropic has only chat+completion models?
@@ -8205,7 +8361,7 @@
8205
8361
  // eslint-disable-next-line prefer-const
8206
8362
  complete = getCurrentIsoDate();
8207
8363
  usage = computeAnthropicClaudeUsage(content, '', rawResponse);
8208
- return [2 /*return*/, {
8364
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools ChatPromptResult', {
8209
8365
  content: resultContent,
8210
8366
  modelName: rawResponse.model,
8211
8367
  timing: {
@@ -8217,7 +8373,7 @@
8217
8373
  rawRequest: rawRequest,
8218
8374
  rawResponse: rawResponse,
8219
8375
  // <- [🗯]
8220
- }];
8376
+ })];
8221
8377
  }
8222
8378
  });
8223
8379
  });
@@ -8226,7 +8382,7 @@
8226
8382
  TODO: [👏]
8227
8383
  public async callCompletionModel(
8228
8384
  prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>,
8229
- ): Promise<PromptCompletionResult> {
8385
+ ): Promise<CompletionPromptResult> {
8230
8386
 
8231
8387
  if (this.options.isVerbose) {
8232
8388
  console.info('🖋 Anthropic Claude callCompletionModel call');
@@ -8279,7 +8435,7 @@
8279
8435
 
8280
8436
 
8281
8437
 
8282
- return {
8438
+ return $asDeeplyFrozenSerializableJson('AnthropicClaudeExecutionTools CompletionPromptResult',{
8283
8439
  content: resultContent,
8284
8440
  modelName: rawResponse.model || model,
8285
8441
  timing: {
@@ -8289,7 +8445,7 @@
8289
8445
  usage,
8290
8446
  rawResponse,
8291
8447
  // <- [🗯]
8292
- };
8448
+ });
8293
8449
  }
8294
8450
  */
8295
8451
  // <- Note: [🤖] callXxxModel
@@ -8426,7 +8582,7 @@
8426
8582
  * @see https://openai.com/api/pricing/
8427
8583
  * @public exported from `@promptbook/openai`
8428
8584
  */
8429
- var OPENAI_MODELS = [
8585
+ var OPENAI_MODELS = $asDeeplyFrozenSerializableJson('OPENAI_MODELS', [
8430
8586
  /*/
8431
8587
  {
8432
8588
  modelTitle: 'dall-e-3',
@@ -8754,7 +8910,7 @@
8754
8910
  },
8755
8911
  },
8756
8912
  /**/
8757
- ];
8913
+ ]);
8758
8914
  /**
8759
8915
  * Note: [🤖] Add models of new variant
8760
8916
  * TODO: [🧠] Some mechanism to propagate unsureness
@@ -8922,7 +9078,7 @@
8922
9078
  input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
8923
9079
  output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
8924
9080
  };
8925
- return [2 /*return*/, {
9081
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('AzureOpenAiExecutionTools ChatPromptResult', {
8926
9082
  content: resultContent,
8927
9083
  modelName: modelName,
8928
9084
  timing: {
@@ -8932,9 +9088,9 @@
8932
9088
  usage: usage,
8933
9089
  rawPromptContent: rawPromptContent,
8934
9090
  rawRequest: rawRequest,
8935
- rawResponse: rawResponse,
9091
+ rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
8936
9092
  // <- [🗯]
8937
- }];
9093
+ })];
8938
9094
  case 4:
8939
9095
  error_1 = _c.sent();
8940
9096
  throw this.transformAzureError(error_1);
@@ -9009,7 +9165,7 @@
9009
9165
  input: __assign({ tokensCount: uncertainNumber((_a = rawResponse.usage) === null || _a === void 0 ? void 0 : _a.promptTokens) }, computeUsageCounts(prompt.content)),
9010
9166
  output: __assign({ tokensCount: uncertainNumber((_b = rawResponse.usage) === null || _b === void 0 ? void 0 : _b.completionTokens) }, computeUsageCounts(prompt.content)),
9011
9167
  };
9012
- return [2 /*return*/, {
9168
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('AzureOpenAiExecutionTools CompletionPromptResult', {
9013
9169
  content: resultContent,
9014
9170
  modelName: modelName,
9015
9171
  timing: {
@@ -9019,9 +9175,9 @@
9019
9175
  usage: usage,
9020
9176
  rawPromptContent: rawPromptContent,
9021
9177
  rawRequest: rawRequest,
9022
- rawResponse: rawResponse,
9178
+ rawResponse: __assign(__assign({}, rawResponse), { created: rawResponse.created.toISOString() }),
9023
9179
  // <- [🗯]
9024
- }];
9180
+ })];
9025
9181
  case 4:
9026
9182
  error_2 = _c.sent();
9027
9183
  throw this.transformAzureError(error_2);
@@ -9035,11 +9191,14 @@
9035
9191
  * Changes Azure error (which is not propper Error but object) to propper Error
9036
9192
  */
9037
9193
  AzureOpenAiExecutionTools.prototype.transformAzureError = function (azureError) {
9194
+ if (azureError instanceof UnexpectedError) {
9195
+ return azureError;
9196
+ }
9038
9197
  if (typeof azureError !== 'object' || azureError === null) {
9039
9198
  return new PipelineExecutionError("Unknown Azure OpenAI error");
9040
9199
  }
9041
9200
  var code = azureError.code, message = azureError.message;
9042
- return new PipelineExecutionError("".concat(code, ": ").concat(message));
9201
+ return new PipelineExecutionError("".concat(code || '(No Azure error code)', ": ").concat(message));
9043
9202
  };
9044
9203
  return AzureOpenAiExecutionTools;
9045
9204
  }());
@@ -9116,6 +9275,24 @@
9116
9275
  },
9117
9276
  });
9118
9277
 
9278
+ /**
9279
+ * Detects if the code is running in a browser environment in main thread (Not in a web worker)
9280
+ *
9281
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9282
+ *
9283
+ * @public exported from `@promptbook/utils`
9284
+ */
9285
+ var $isRunningInBrowser = new Function("\n try {\n return this === window;\n } catch (e) {\n return false;\n }\n");
9286
+
9287
+ /**
9288
+ * Detects if the code is running in a web worker
9289
+ *
9290
+ * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
9291
+ *
9292
+ * @public exported from `@promptbook/utils`
9293
+ */
9294
+ var $isRunningInWebWorker = new Function("\n try {\n if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {\n return true;\n } else {\n return false;\n }\n } catch (e) {\n return false;\n }\n");
9295
+
9119
9296
  /**
9120
9297
  * Computes the usage of the OpenAI API based on the response from OpenAI
9121
9298
  *
@@ -9301,7 +9478,7 @@
9301
9478
  if (resultContent === null) {
9302
9479
  throw new PipelineExecutionError('No response message from OpenAI');
9303
9480
  }
9304
- return [2 /*return*/, {
9481
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools ChatPromptResult', {
9305
9482
  content: resultContent,
9306
9483
  modelName: rawResponse.model || modelName,
9307
9484
  timing: {
@@ -9313,7 +9490,7 @@
9313
9490
  rawRequest: rawRequest,
9314
9491
  rawResponse: rawResponse,
9315
9492
  // <- [🗯]
9316
- }];
9493
+ })];
9317
9494
  }
9318
9495
  });
9319
9496
  });
@@ -9370,7 +9547,7 @@
9370
9547
  // eslint-disable-next-line prefer-const
9371
9548
  complete = getCurrentIsoDate();
9372
9549
  usage = computeOpenAiUsage(content, resultContent || '', rawResponse);
9373
- return [2 /*return*/, {
9550
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools CompletionPromptResult', {
9374
9551
  content: resultContent,
9375
9552
  modelName: rawResponse.model || modelName,
9376
9553
  timing: {
@@ -9382,7 +9559,7 @@
9382
9559
  rawRequest: rawRequest,
9383
9560
  rawResponse: rawResponse,
9384
9561
  // <- [🗯]
9385
- }];
9562
+ })];
9386
9563
  }
9387
9564
  });
9388
9565
  });
@@ -9430,7 +9607,7 @@
9430
9607
  // eslint-disable-next-line prefer-const
9431
9608
  complete = getCurrentIsoDate();
9432
9609
  usage = computeOpenAiUsage(content, '', rawResponse);
9433
- return [2 /*return*/, {
9610
+ return [2 /*return*/, $asDeeplyFrozenSerializableJson('OpenAiExecutionTools EmbeddingPromptResult', {
9434
9611
  content: resultContent,
9435
9612
  modelName: rawResponse.model || modelName,
9436
9613
  timing: {
@@ -9442,7 +9619,7 @@
9442
9619
  rawRequest: rawRequest,
9443
9620
  rawResponse: rawResponse,
9444
9621
  // <- [🗯]
9445
- }];
9622
+ })];
9446
9623
  }
9447
9624
  });
9448
9625
  });
@@ -9501,6 +9678,9 @@
9501
9678
  */
9502
9679
  var createOpenAiExecutionTools = Object.assign(function (options) {
9503
9680
  // TODO: [🧠] !!!! If browser, auto add `dangerouslyAllowBrowser`
9681
+ if (($isRunningInBrowser() || $isRunningInWebWorker()) && !options.dangerouslyAllowBrowser) {
9682
+ options = __assign(__assign({}, options), { dangerouslyAllowBrowser: true });
9683
+ }
9504
9684
  return new OpenAiExecutionTools(options);
9505
9685
  }, {
9506
9686
  packageName: '@promptbook/openai',