@promptbook/node 0.104.0-1 → 0.104.0-10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/esm/index.es.js +43 -42
  2. package/esm/index.es.js.map +1 -1
  3. package/esm/typings/servers.d.ts +8 -0
  4. package/esm/typings/src/_packages/core.index.d.ts +2 -0
  5. package/esm/typings/src/_packages/types.index.d.ts +10 -2
  6. package/esm/typings/src/book-2.0/agent-source/AgentBasicInformation.d.ts +6 -1
  7. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +6 -6
  8. package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirementsWithCommitments.closed.test.d.ts +1 -0
  9. package/esm/typings/src/book-2.0/utils/generatePlaceholderAgentProfileImageUrl.d.ts +3 -3
  10. package/esm/typings/src/book-components/Chat/Chat/ChatMessageItem.d.ts +5 -1
  11. package/esm/typings/src/book-components/Chat/Chat/ChatProps.d.ts +5 -0
  12. package/esm/typings/src/book-components/Chat/CodeBlock/CodeBlock.d.ts +13 -0
  13. package/esm/typings/src/book-components/Chat/MarkdownContent/MarkdownContent.d.ts +1 -0
  14. package/esm/typings/src/book-components/Chat/types/ChatMessage.d.ts +7 -11
  15. package/esm/typings/src/book-components/_common/Dropdown/Dropdown.d.ts +2 -2
  16. package/esm/typings/src/book-components/_common/MenuHoisting/MenuHoistingContext.d.ts +56 -0
  17. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentCollectionInSupabase.d.ts +21 -11
  18. package/esm/typings/src/collection/agent-collection/constructors/agent-collection-in-supabase/AgentsDatabaseSchema.d.ts +80 -14
  19. package/esm/typings/src/commitments/DICTIONARY/DICTIONARY.d.ts +46 -0
  20. package/esm/typings/src/commitments/index.d.ts +2 -1
  21. package/esm/typings/src/llm-providers/_multiple/MultipleLlmExecutionTools.d.ts +6 -2
  22. package/esm/typings/src/llm-providers/agent/AgentLlmExecutionTools.d.ts +1 -1
  23. package/esm/typings/src/llm-providers/ollama/OllamaExecutionTools.d.ts +1 -1
  24. package/esm/typings/src/llm-providers/openai/createOpenAiCompatibleExecutionTools.d.ts +1 -1
  25. package/esm/typings/src/llm-providers/remote/RemoteLlmExecutionTools.d.ts +1 -0
  26. package/esm/typings/src/types/Message.d.ts +49 -0
  27. package/esm/typings/src/types/ModelRequirements.d.ts +38 -14
  28. package/esm/typings/src/types/typeAliases.d.ts +23 -1
  29. package/esm/typings/src/utils/color/utils/colorToDataUrl.d.ts +2 -1
  30. package/esm/typings/src/utils/environment/$detectRuntimeEnvironment.d.ts +4 -4
  31. package/esm/typings/src/utils/environment/$isRunningInBrowser.d.ts +1 -1
  32. package/esm/typings/src/utils/environment/$isRunningInJest.d.ts +1 -1
  33. package/esm/typings/src/utils/environment/$isRunningInNode.d.ts +1 -1
  34. package/esm/typings/src/utils/environment/$isRunningInWebWorker.d.ts +1 -1
  35. package/esm/typings/src/utils/markdown/extractAllBlocksFromMarkdown.d.ts +2 -2
  36. package/esm/typings/src/utils/markdown/extractOneBlockFromMarkdown.d.ts +2 -2
  37. package/esm/typings/src/utils/random/$randomBase58.d.ts +12 -0
  38. package/esm/typings/src/version.d.ts +1 -1
  39. package/package.json +2 -2
  40. package/umd/index.umd.js +47 -46
  41. package/umd/index.umd.js.map +1 -1
  42. package/esm/typings/src/book-2.0/utils/generateGravatarUrl.d.ts +0 -10
@@ -1,8 +1,8 @@
1
1
  import type { ChatParticipant } from '../../book-components/Chat/types/ChatParticipant';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, PromptResult } from '../../execution/PromptResult';
5
- import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, Prompt } from '../../types/Prompt';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult, PromptResult } from '../../execution/PromptResult';
5
+ import type { ChatPrompt, CompletionPrompt, EmbeddingPrompt, ImagePrompt, Prompt } from '../../types/Prompt';
6
6
  import type { string_markdown, string_markdown_text, string_title } from '../../types/typeAliases';
7
7
  /**
8
8
  * Multiple LLM Execution Tools is a proxy server that uses multiple execution tools internally and exposes the executor interface externally.
@@ -43,6 +43,10 @@ export declare class MultipleLlmExecutionTools implements LlmExecutionTools {
43
43
  * Calls the best available embedding model
44
44
  */
45
45
  callEmbeddingModel(prompt: EmbeddingPrompt): Promise<EmbeddingPromptResult>;
46
+ /**
47
+ * Calls the best available embedding model
48
+ */
49
+ callImageGenerationModel(prompt: ImagePrompt): Promise<ImagePromptResult>;
46
50
  /**
47
51
  * Calls the best available model
48
52
  *
@@ -22,7 +22,7 @@ import type { CreateAgentLlmExecutionToolsOptions } from './CreateAgentLlmExecut
22
22
  * @public exported from `@promptbook/core`
23
23
  */
24
24
  export declare class AgentLlmExecutionTools implements LlmExecutionTools {
25
- private readonly options;
25
+ protected readonly options: CreateAgentLlmExecutionToolsOptions;
26
26
  /**
27
27
  * Cache of OpenAI assistants to avoid creating duplicates
28
28
  */
@@ -39,7 +39,7 @@ export declare class OllamaExecutionTools extends OpenAiCompatibleExecutionTools
39
39
  */
40
40
  protected getDefaultEmbeddingModel(): AvailableModel;
41
41
  /**
42
- * Default model for image generation variant.
42
+ * Default model for completion variant.
43
43
  */
44
44
  protected getDefaultImageGenerationModel(): AvailableModel;
45
45
  }
@@ -64,7 +64,7 @@ export declare class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompa
64
64
  */
65
65
  protected getDefaultEmbeddingModel(): AvailableModel;
66
66
  /**
67
- * Default model for image generation variant.
67
+ * Default model for completion variant.
68
68
  */
69
69
  protected getDefaultImageGenerationModel(): AvailableModel;
70
70
  }
@@ -46,6 +46,7 @@ export declare class RemoteLlmExecutionTools<TCustomOptions = undefined> impleme
46
46
  private callCommonModel;
47
47
  }
48
48
  /**
49
+ * TODO: !!!! Deprecate pipeline server and all of its components
49
50
  * TODO: Maybe use `$exportJson`
50
51
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
51
52
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -0,0 +1,49 @@
1
+ import { Arrayable } from 'type-fest';
2
+ import { really_any } from '../_packages/types.index';
3
+ import { id, string_date_iso8601, string_markdown } from './typeAliases';
4
+ /**
5
+ * A generic message structure for various communication channels
6
+ */
7
+ export type Message<TParticipant> = {
8
+ /**
9
+ * Unique identifier of the message
10
+ */
11
+ readonly id?: id;
12
+ /**
13
+ * Date when the message was created
14
+ */
15
+ readonly createdAt?: Date | string_date_iso8601;
16
+ /**
17
+ * The communication channel of the message
18
+ */
19
+ readonly channel?: 'PROMPTBOOK_CHAT' | 'EMAIL' | 'SMS' | 'WHATSAPP' | 'TELEGRAM' | 'SIGNAL' | string | 'UNKNOWN';
20
+ /**
21
+ * Is the message send from the Promptbook or to the Promptbook
22
+ */
23
+ readonly direction?: 'INBOUND' | 'OUTBOUND' | 'INTERNAL' | 'INITIAL';
24
+ /**
25
+ * Who sent the message
26
+ */
27
+ readonly sender: TParticipant;
28
+ /**
29
+ * Who are the recipients of the message
30
+ */
31
+ readonly recipients?: Readonly<Arrayable<TParticipant>>;
32
+ /**
33
+ * The content of the message as markdown
34
+ *
35
+ * Note: We are converting all message content to markdown for consistency
36
+ */
37
+ readonly content: string_markdown;
38
+ /**
39
+ * The thread identifier the message belongs to
40
+ *
41
+ * - `null` means the message is not part of any thread
42
+ * - `undefined` means that we don't know if the message is part of a thread or not
43
+ */
44
+ readonly threadId?: id | null;
45
+ /**
46
+ * Arbitrary metadata associated with the message
47
+ */
48
+ readonly metadata?: Readonly<Record<string, really_any>>;
49
+ };
@@ -17,7 +17,17 @@ export type CompletionModelRequirements = CommonModelRequirements & {
17
17
  /**
18
18
  * Completion model variant
19
19
  */
20
- modelVariant: 'COMPLETION';
20
+ readonly modelVariant: 'COMPLETION';
21
+ /**
22
+ * The temperature of the model
23
+ *
24
+ * Note: [💱] Promptbook is using just `temperature` (not `top_k` and `top_p`)
25
+ */
26
+ readonly temperature?: number_model_temperature;
27
+ /**
28
+ * Maximum number of tokens that can be generated by the model
29
+ */
30
+ readonly maxTokens?: number;
21
31
  };
22
32
  /**
23
33
  * Model requirements for the chat variant
@@ -28,11 +38,21 @@ export type ChatModelRequirements = CommonModelRequirements & {
28
38
  /**
29
39
  * Chat model variant
30
40
  */
31
- modelVariant: 'CHAT';
41
+ readonly modelVariant: 'CHAT';
32
42
  /**
33
43
  * System message to be used in the model
34
44
  */
35
45
  readonly systemMessage?: string_system_message;
46
+ /**
47
+ * The temperature of the model
48
+ *
49
+ * Note: [💱] Promptbook is using just `temperature` (not `top_k` and `top_p`)
50
+ */
51
+ readonly temperature?: number_model_temperature;
52
+ /**
53
+ * Maximum number of tokens that can be generated by the model
54
+ */
55
+ readonly maxTokens?: number;
36
56
  };
37
57
  /**
38
58
  * Model requirements for the image generation variant
@@ -43,7 +63,21 @@ export type ImageGenerationModelRequirements = CommonModelRequirements & {
43
63
  /**
44
64
  * Image generation model variant
45
65
  */
46
- modelVariant: 'IMAGE_GENERATION';
66
+ readonly modelVariant: 'IMAGE_GENERATION';
67
+ /**
68
+ * Size of the generated image
69
+ *
70
+ * e.g. '1536x1536'
71
+ */
72
+ readonly size?: '1024x1024' | '1792x1024' | '1024x1792' | `${number}x${number}`;
73
+ /**
74
+ * Quality of the generated image
75
+ */
76
+ readonly quality?: 'standard' | 'hd';
77
+ /**
78
+ * Style of the generated image
79
+ */
80
+ readonly style?: 'vivid' | 'natural';
47
81
  };
48
82
  /**
49
83
  * Model requirements for the embedding variant
@@ -54,7 +88,7 @@ export type EmbeddingModelRequirements = CommonModelRequirements & {
54
88
  /**
55
89
  * Embedding model variant
56
90
  */
57
- modelVariant: 'EMBEDDING';
91
+ readonly modelVariant: 'EMBEDDING';
58
92
  };
59
93
  /**
60
94
  * Common properties for all model requirements variants
@@ -84,20 +118,10 @@ export type CommonModelRequirements = {
84
118
  * @example 'gpt-4', 'gpt-4-32k-0314', 'gpt-3.5-turbo-instruct',...
85
119
  */
86
120
  readonly modelName?: string_model_name;
87
- /**
88
- * The temperature of the model
89
- *
90
- * Note: [💱] Promptbook is using just `temperature` (not `top_k` and `top_p`)
91
- */
92
- readonly temperature?: number_model_temperature;
93
121
  /**
94
122
  * Seed for the model
95
123
  */
96
124
  readonly seed?: number_seed;
97
- /**
98
- * Maximum number of tokens that can be generated by the model
99
- */
100
- readonly maxTokens?: number;
101
125
  };
102
126
  /**
103
127
  * TODO: [🧠][🈁] `seed` should maybe be somewhere else (not in `ModelRequirements`) (similar that `user` identification is not here)
@@ -14,9 +14,15 @@ export type string_model_name = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-3
14
14
  /**
15
15
  * Semantic helper
16
16
  *
17
- * For example `"A cat wearing a hat"`
17
+ * For example `"How many eyes does a cat have?"`
18
18
  */
19
19
  export type string_prompt = string;
20
+ /**
21
+ * Semantic helper
22
+ *
23
+ * For example `"A cat wearing a hat"`
24
+ */
25
+ export type string_prompt_image = string;
20
26
  /**
21
27
  * Semantic helper
22
28
  *
@@ -140,6 +146,8 @@ export type string_title = string;
140
146
  * Semantic helper
141
147
  *
142
148
  * For example `"My AI Assistant"`
149
+ *
150
+ * TODO: !!!! Brand the type
143
151
  */
144
152
  export type string_agent_name = string;
145
153
  /**
@@ -154,6 +162,14 @@ export type string_agent_name_in_book = string;
154
162
  * For example `"b126926439c5fcb83609888a11283723c1ef137c0ad599a77a1be81812bd221d"`
155
163
  */
156
164
  export type string_agent_hash = string_sha256;
165
+ /**
166
+ * Semantic helper
167
+ *
168
+ * For example `"3mJr7AoUXx2Wqd"`
169
+ *
170
+ * TODO: !!!! Brand the type
171
+ */
172
+ export type string_agent_permanent_id = string_base_58;
157
173
  /**
158
174
  * Unstructured description of the persona
159
175
  *
@@ -499,6 +515,12 @@ export type string_user_id = id | string_email;
499
515
  * For example `"b126926439c5fcb83609888a11283723c1ef137c0ad599a77a1be81812bd221d"`
500
516
  */
501
517
  export type string_sha256 = string;
518
+ /**
519
+ * Semantic helper
520
+ *
521
+ * For example `"4JmF3b2J5dGVz"`
522
+ */
523
+ export type string_base_58 = string;
502
524
  /**
503
525
  * Semantic helper
504
526
  *
@@ -1,10 +1,11 @@
1
+ import { string_color, string_data_url, string_url_image } from '../../../types/typeAliases';
1
2
  import { Color } from '../Color';
2
3
  /**
3
4
  * Makes data url from color
4
5
  *
5
6
  * @public exported from `@promptbook/color`
6
7
  */
7
- export declare function colorToDataUrl(color: Color): string;
8
+ export declare function colorToDataUrl(color: Color | string_color): string_data_url & string_url_image;
8
9
  /**
9
10
  * TODO: Make as functions NOT const
10
11
  */
@@ -6,10 +6,10 @@
6
6
  * @public exported from `@promptbook/utils`
7
7
  */
8
8
  export declare function $detectRuntimeEnvironment(): {
9
- isRunningInBrowser: any;
10
- isRunningInJest: any;
11
- isRunningInNode: any;
12
- isRunningInWebWorker: any;
9
+ isRunningInBrowser: boolean;
10
+ isRunningInJest: boolean;
11
+ isRunningInNode: boolean;
12
+ isRunningInWebWorker: boolean;
13
13
  };
14
14
  /**
15
15
  * TODO: [🎺] Also detect and report node version here
@@ -5,7 +5,7 @@
5
5
  *
6
6
  * @public exported from `@promptbook/utils`
7
7
  */
8
- export declare const $isRunningInBrowser: Function;
8
+ export declare function $isRunningInBrowser(): boolean;
9
9
  /**
10
10
  * TODO: [🎺]
11
11
  */
@@ -5,7 +5,7 @@
5
5
  *
6
6
  * @public exported from `@promptbook/utils`
7
7
  */
8
- export declare const $isRunningInJest: Function;
8
+ export declare function $isRunningInJest(): boolean;
9
9
  /**
10
10
  * TODO: [🎺]
11
11
  */
@@ -5,7 +5,7 @@
5
5
  *
6
6
  * @public exported from `@promptbook/utils`
7
7
  */
8
- export declare const $isRunningInNode: Function;
8
+ export declare function $isRunningInNode(): boolean;
9
9
  /**
10
10
  * TODO: [🎺]
11
11
  */
@@ -5,7 +5,7 @@
5
5
  *
6
6
  * @public exported from `@promptbook/utils`
7
7
  */
8
- export declare const $isRunningInWebWorker: Function;
8
+ export declare function $isRunningInWebWorker(): boolean;
9
9
  /**
10
10
  * TODO: [🎺]
11
11
  */
@@ -2,7 +2,7 @@ import type { string_markdown } from '../../types/typeAliases';
2
2
  /**
3
3
  * Single code block inside markdown.
4
4
  */
5
- export type CodeBlock = {
5
+ export type MarkdownCodeBlock = {
6
6
  /**
7
7
  * Which notation was used to open the code block
8
8
  */
@@ -30,7 +30,7 @@ export type CodeBlock = {
30
30
  * @throws {ParseError} if block is not closed properly
31
31
  * @public exported from `@promptbook/markdown-utils`
32
32
  */
33
- export declare function extractAllBlocksFromMarkdown(markdown: string_markdown): ReadonlyArray<CodeBlock>;
33
+ export declare function extractAllBlocksFromMarkdown(markdown: string_markdown): ReadonlyArray<MarkdownCodeBlock>;
34
34
  /**
35
35
  * TODO: Maybe name for `blockNotation` instead of '```' and '>'
36
36
  */
@@ -1,5 +1,5 @@
1
1
  import type { string_markdown } from '../../types/typeAliases';
2
- import type { CodeBlock } from './extractAllBlocksFromMarkdown';
2
+ import type { MarkdownCodeBlock } from './extractAllBlocksFromMarkdown';
3
3
  /**
4
4
  * Extracts exactly ONE code block from markdown.
5
5
  *
@@ -16,7 +16,7 @@ import type { CodeBlock } from './extractAllBlocksFromMarkdown';
16
16
  * @public exported from `@promptbook/markdown-utils`
17
17
  * @throws {ParseError} if there is not exactly one code block in the markdown
18
18
  */
19
- export declare function extractOneBlockFromMarkdown(markdown: string_markdown): CodeBlock;
19
+ export declare function extractOneBlockFromMarkdown(markdown: string_markdown): MarkdownCodeBlock;
20
20
  /***
21
21
  * TODO: [🍓][🌻] Decide of this is internal utility, external util OR validator/postprocessor
22
22
  */
@@ -0,0 +1,12 @@
1
+ /**
2
+ * Generates random base58 string
3
+ *
4
+ * Note: `$` is used to indicate that this function is not a pure function - it is not deterministic
5
+ * Note: This function is cryptographically secure (it uses crypto.randomBytes internally)
6
+ *
7
+ * @param length - length of the string
8
+ * @returns secure random base58 string
9
+ *
10
+ * @private internal helper function
11
+ */
12
+ export declare function $randomBase58(length: number): string;
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-0`).
18
+ * It follows semantic versioning (e.g., `0.104.0-9`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/node",
3
- "version": "0.104.0-1",
3
+ "version": "0.104.0-10",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -93,7 +93,7 @@
93
93
  "module": "./esm/index.es.js",
94
94
  "typings": "./esm/typings/src/_packages/node.index.d.ts",
95
95
  "peerDependencies": {
96
- "@promptbook/core": "0.104.0-1"
96
+ "@promptbook/core": "0.104.0-10"
97
97
  },
98
98
  "dependencies": {
99
99
  "colors": "1.4.0",
package/umd/index.umd.js CHANGED
@@ -45,7 +45,7 @@
45
45
  * @generated
46
46
  * @see https://github.com/webgptorg/promptbook
47
47
  */
48
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-1';
48
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-10';
49
49
  /**
50
50
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
51
51
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4056,6 +4056,12 @@
4056
4056
  callEmbeddingModel(prompt) {
4057
4057
  return this.callCommonModel(prompt);
4058
4058
  }
4059
+ /**
4060
+ * Calls the best available embedding model
4061
+ */
4062
+ callImageGenerationModel(prompt) {
4063
+ return this.callCommonModel(prompt);
4064
+ }
4059
4065
  // <- Note: [🤖]
4060
4066
  /**
4061
4067
  * Calls the best available model
@@ -4082,6 +4088,11 @@
4082
4088
  continue llm;
4083
4089
  }
4084
4090
  return await llmExecutionTools.callEmbeddingModel(prompt);
4091
+ case 'IMAGE_GENERATION':
4092
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
4093
+ continue llm;
4094
+ }
4095
+ return await llmExecutionTools.callImageGenerationModel(prompt);
4085
4096
  // <- case [🤖]:
4086
4097
  default:
4087
4098
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -4985,8 +4996,9 @@
4985
4996
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
4986
4997
  break variant;
4987
4998
  case 'EMBEDDING':
4999
+ case 'IMAGE_GENERATION':
4988
5000
  throw new PipelineExecutionError(spaceTrim$1.spaceTrim((block) => `
4989
- Embedding model can not be used in pipeline
5001
+ ${modelRequirements.modelVariant} model can not be used in pipeline
4990
5002
 
4991
5003
  This should be catched during parsing
4992
5004
 
@@ -6211,6 +6223,15 @@
6211
6223
  return promptResult;
6212
6224
  };
6213
6225
  }
6226
+ if (llmTools.callImageGenerationModel !== undefined) {
6227
+ proxyTools.callImageGenerationModel = async (prompt) => {
6228
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
6229
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
6230
+ totalUsage = addUsage(totalUsage, promptResult.usage);
6231
+ spending.next(promptResult.usage);
6232
+ return promptResult;
6233
+ };
6234
+ }
6214
6235
  // <- Note: [🤖]
6215
6236
  return proxyTools;
6216
6237
  }
@@ -8942,11 +8963,7 @@
8942
8963
  // TODO: [🚜] DRY
8943
8964
  if ($taskJson.modelRequirements[command.key] !== undefined) {
8944
8965
  if ($taskJson.modelRequirements[command.key] === command.value) {
8945
- console.warn(`Multiple commands \`MODEL ${{
8946
- modelName: 'NAME',
8947
- modelVariant: 'VARIANT',
8948
- maxTokens: '???',
8949
- }[command.key]} ${command.value}\` in the task "${$taskJson.title || $taskJson.name}"`);
8966
+ console.warn(`Multiple commands \`MODEL ${command.key} ${command.value}\` in the task "${$taskJson.title || $taskJson.name}"`);
8950
8967
  // <- TODO: [🏮] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
8951
8968
  }
8952
8969
  else {
@@ -10828,13 +10845,14 @@
10828
10845
  *
10829
10846
  * @public exported from `@promptbook/utils`
10830
10847
  */
10831
- const $isRunningInNode = new Function(`
10832
- try {
10833
- return this === global;
10834
- } catch (e) {
10835
- return false;
10848
+ function $isRunningInNode() {
10849
+ try {
10850
+ return typeof process !== 'undefined' && process.versions != null && process.versions.node != null;
10851
+ }
10852
+ catch (e) {
10853
+ return false;
10854
+ }
10836
10855
  }
10837
- `);
10838
10856
  /**
10839
10857
  * TODO: [🎺]
10840
10858
  */
@@ -11395,7 +11413,7 @@
11395
11413
  ${i + 1}) **${title}** \`${className}\` from \`${packageName}\`
11396
11414
  ${morePieces.join('; ')}
11397
11415
  `);
11398
- if ($isRunningInNode) {
11416
+ if ($isRunningInNode()) {
11399
11417
  if (isInstalled && isFullyConfigured) {
11400
11418
  providerMessage = colors__default["default"].green(providerMessage);
11401
11419
  }
@@ -11499,13 +11517,14 @@
11499
11517
  *
11500
11518
  * @public exported from `@promptbook/utils`
11501
11519
  */
11502
- const $isRunningInBrowser = new Function(`
11503
- try {
11504
- return this === window;
11505
- } catch (e) {
11506
- return false;
11520
+ function $isRunningInBrowser() {
11521
+ try {
11522
+ return typeof window !== 'undefined' && typeof window.document !== 'undefined';
11523
+ }
11524
+ catch (e) {
11525
+ return false;
11526
+ }
11507
11527
  }
11508
- `);
11509
11528
  /**
11510
11529
  * TODO: [🎺]
11511
11530
  */
@@ -11517,17 +11536,17 @@
11517
11536
  *
11518
11537
  * @public exported from `@promptbook/utils`
11519
11538
  */
11520
- const $isRunningInWebWorker = new Function(`
11521
- try {
11522
- if (typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope) {
11523
- return true;
11524
- } else {
11539
+ function $isRunningInWebWorker() {
11540
+ try {
11541
+ // Note: Check for importScripts which is specific to workers
11542
+ // and not available in the main browser thread
11543
+ return (typeof self !== 'undefined' &&
11544
+ typeof self.importScripts === 'function');
11545
+ }
11546
+ catch (e) {
11525
11547
  return false;
11526
11548
  }
11527
- } catch (e) {
11528
- return false;
11529
11549
  }
11530
- `);
11531
11550
  /**
11532
11551
  * TODO: [🎺]
11533
11552
  */
@@ -11682,24 +11701,6 @@
11682
11701
  * Note: [🟢] Code in this file should never be never released in packages that could be imported into browser environment
11683
11702
  */
11684
11703
 
11685
- /**
11686
- * Detects if the code is running in jest environment
11687
- *
11688
- * Note: `$` is used to indicate that this function is not a pure function - it looks at the global object to determine the environment
11689
- *
11690
- * @public exported from `@promptbook/utils`
11691
- */
11692
- new Function(`
11693
- try {
11694
- return process.env.JEST_WORKER_ID !== undefined;
11695
- } catch (e) {
11696
- return false;
11697
- }
11698
- `);
11699
- /**
11700
- * TODO: [🎺]
11701
- */
11702
-
11703
11704
  /**
11704
11705
  * Makes first letter of a string lowercase
11705
11706
  *