@promptbook/wizard 0.104.0-6 โ†’ 0.104.0-8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,6 +22,14 @@ type ServerConfiguration = {
22
22
  */
23
23
  urls: Array<string_promptbook_server_url>;
24
24
  };
25
+ /**
26
+ * Core Promptbook server configuration.
27
+ *
28
+ * This server is also used for auto-federation in the Agents Server.
29
+ *
30
+ * @public exported from `@promptbook/core`
31
+ */
32
+ export declare const CORE_SERVER: ServerConfiguration;
25
33
  /**
26
34
  * Available remote servers for the Promptbook
27
35
  *
@@ -191,6 +191,7 @@ import { SectionTypes } from '../types/SectionType';
191
191
  import { TaskTypes } from '../types/TaskType';
192
192
  import { aboutPromptbookInformation } from '../utils/misc/aboutPromptbookInformation';
193
193
  import { $generateBookBoilerplate } from '../utils/random/$generateBookBoilerplate';
194
+ import { CORE_SERVER } from '../../servers';
194
195
  import { REMOTE_SERVER_URLS } from '../../servers';
195
196
  import { AUTO_FEDERATED_AGENT_SERVER_URLS } from '../../servers';
196
197
  export { BOOK_LANGUAGE_VERSION, PROMPTBOOK_ENGINE_VERSION };
@@ -386,5 +387,6 @@ export { SectionTypes };
386
387
  export { TaskTypes };
387
388
  export { aboutPromptbookInformation };
388
389
  export { $generateBookBoilerplate };
390
+ export { CORE_SERVER };
389
391
  export { REMOTE_SERVER_URLS };
390
392
  export { AUTO_FEDERATED_AGENT_SERVER_URLS };
@@ -206,6 +206,7 @@ import type { string_char_emoji } from '../types/typeAliasEmoji';
206
206
  import type { string_business_category_name } from '../types/typeAliases';
207
207
  import type { string_model_name } from '../types/typeAliases';
208
208
  import type { string_prompt } from '../types/typeAliases';
209
+ import type { string_prompt_image } from '../types/typeAliases';
209
210
  import type { string_template } from '../types/typeAliases';
210
211
  import type { string_text_prompt } from '../types/typeAliases';
211
212
  import type { string_chat_prompt } from '../types/typeAliases';
@@ -573,6 +574,7 @@ export type { string_char_emoji };
573
574
  export type { string_business_category_name };
574
575
  export type { string_model_name };
575
576
  export type { string_prompt };
577
+ export type { string_prompt_image };
576
578
  export type { string_template };
577
579
  export type { string_text_prompt };
578
580
  export type { string_chat_prompt };
@@ -1,4 +1,4 @@
1
- import type { string_agent_name, string_agent_permanent_id, string_url_image } from '../../types/typeAliases';
1
+ import type { string_agent_name, string_agent_permanent_id, string_url, string_url_image } from '../../types/typeAliases';
2
2
  /**
3
3
  * Generates an image for the agent to use as profile image
4
4
  *
@@ -7,7 +7,7 @@ import type { string_agent_name, string_agent_permanent_id, string_url_image } f
7
7
  *
8
8
  * @public exported from `@promptbook/core`
9
9
  */
10
- export declare function generatePlaceholderAgentProfileImageUrl(agentIdOrName: string_agent_permanent_id | string_agent_name): string_url_image;
10
+ export declare function generatePlaceholderAgentProfileImageUrl(agentIdOrName: string_agent_permanent_id | string_agent_name, agentsServerUrl?: URL | string_url): string_url_image;
11
11
  /**
12
12
  * TODO: [๐Ÿคน] Figure out best placeholder image generator https://i.pravatar.cc/1000?u=568
13
13
  */
@@ -17,7 +17,17 @@ export type CompletionModelRequirements = CommonModelRequirements & {
17
17
  /**
18
18
  * Completion model variant
19
19
  */
20
- modelVariant: 'COMPLETION';
20
+ readonly modelVariant: 'COMPLETION';
21
+ /**
22
+ * The temperature of the model
23
+ *
24
+ * Note: [๐Ÿ’ฑ] Promptbook is using just `temperature` (not `top_k` and `top_p`)
25
+ */
26
+ readonly temperature?: number_model_temperature;
27
+ /**
28
+ * Maximum number of tokens that can be generated by the model
29
+ */
30
+ readonly maxTokens?: number;
21
31
  };
22
32
  /**
23
33
  * Model requirements for the chat variant
@@ -28,11 +38,21 @@ export type ChatModelRequirements = CommonModelRequirements & {
28
38
  /**
29
39
  * Chat model variant
30
40
  */
31
- modelVariant: 'CHAT';
41
+ readonly modelVariant: 'CHAT';
32
42
  /**
33
43
  * System message to be used in the model
34
44
  */
35
45
  readonly systemMessage?: string_system_message;
46
+ /**
47
+ * The temperature of the model
48
+ *
49
+ * Note: [๐Ÿ’ฑ] Promptbook is using just `temperature` (not `top_k` and `top_p`)
50
+ */
51
+ readonly temperature?: number_model_temperature;
52
+ /**
53
+ * Maximum number of tokens that can be generated by the model
54
+ */
55
+ readonly maxTokens?: number;
36
56
  };
37
57
  /**
38
58
  * Model requirements for the image generation variant
@@ -43,7 +63,21 @@ export type ImageGenerationModelRequirements = CommonModelRequirements & {
43
63
  /**
44
64
  * Image generation model variant
45
65
  */
46
- modelVariant: 'IMAGE_GENERATION';
66
+ readonly modelVariant: 'IMAGE_GENERATION';
67
+ /**
68
+ * Size of the generated image
69
+ *
70
+ * e.g. '1536x1536'
71
+ */
72
+ readonly size?: '1024x1024' | '1792x1024' | '1024x1792' | `${number}x${number}`;
73
+ /**
74
+ * Quality of the generated image
75
+ */
76
+ readonly quality?: 'standard' | 'hd';
77
+ /**
78
+ * Style of the generated image
79
+ */
80
+ readonly style?: 'vivid' | 'natural';
47
81
  };
48
82
  /**
49
83
  * Model requirements for the embedding variant
@@ -54,7 +88,7 @@ export type EmbeddingModelRequirements = CommonModelRequirements & {
54
88
  /**
55
89
  * Embedding model variant
56
90
  */
57
- modelVariant: 'EMBEDDING';
91
+ readonly modelVariant: 'EMBEDDING';
58
92
  };
59
93
  /**
60
94
  * Common properties for all model requirements variants
@@ -84,20 +118,10 @@ export type CommonModelRequirements = {
84
118
  * @example 'gpt-4', 'gpt-4-32k-0314', 'gpt-3.5-turbo-instruct',...
85
119
  */
86
120
  readonly modelName?: string_model_name;
87
- /**
88
- * The temperature of the model
89
- *
90
- * Note: [๐Ÿ’ฑ] Promptbook is using just `temperature` (not `top_k` and `top_p`)
91
- */
92
- readonly temperature?: number_model_temperature;
93
121
  /**
94
122
  * Seed for the model
95
123
  */
96
124
  readonly seed?: number_seed;
97
- /**
98
- * Maximum number of tokens that can be generated by the model
99
- */
100
- readonly maxTokens?: number;
101
125
  };
102
126
  /**
103
127
  * TODO: [๐Ÿง ][๐Ÿˆ] `seed` should maybe be somewhere else (not in `ModelRequirements`) (similar that `user` identification is not here)
@@ -14,9 +14,15 @@ export type string_model_name = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-3
14
14
  /**
15
15
  * Semantic helper
16
16
  *
17
- * For example `"A cat wearing a hat"`
17
+ * For example `"How many eyes does a cat have?"`
18
18
  */
19
19
  export type string_prompt = string;
20
+ /**
21
+ * Semantic helper
22
+ *
23
+ * For example `"A cat wearing a hat"`
24
+ */
25
+ export type string_prompt_image = string;
20
26
  /**
21
27
  * Semantic helper
22
28
  *
@@ -1,10 +1,11 @@
1
+ import { string_color, string_data_url, string_url_image } from '../../../types/typeAliases';
1
2
  import { Color } from '../Color';
2
3
  /**
3
4
  * Makes data url from color
4
5
  *
5
6
  * @public exported from `@promptbook/color`
6
7
  */
7
- export declare function colorToDataUrl(color: Color): string;
8
+ export declare function colorToDataUrl(color: Color | string_color): string_data_url & string_url_image;
8
9
  /**
9
10
  * TODO: Make as functions NOT const
10
11
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-5`).
18
+ * It follows semantic versioning (e.g., `0.104.0-7`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/wizard",
3
- "version": "0.104.0-6",
3
+ "version": "0.104.0-8",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/wizard.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.104.0-6"
98
+ "@promptbook/core": "0.104.0-8"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/deepseek": "0.1.17",
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-6';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-8';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -58,6 +58,8 @@
58
58
  * Core Promptbook server configuration.
59
59
  *
60
60
  * This server is also used for auto-federation in the Agents Server.
61
+ *
62
+ * @public exported from `@promptbook/core`
61
63
  */
62
64
  const CORE_SERVER = {
63
65
  title: 'Promptbook Core',
@@ -6254,13 +6256,14 @@
6254
6256
  const modelName = currentModelRequirements.modelName || this.getDefaultImageGenerationModel().modelName;
6255
6257
  const modelSettings = {
6256
6258
  model: modelName,
6257
- // size: currentModelRequirements.size,
6258
- // quality: currentModelRequirements.quality,
6259
- // style: currentModelRequirements.style,
6259
+ size: currentModelRequirements.size,
6260
+ quality: currentModelRequirements.quality,
6261
+ style: currentModelRequirements.style,
6260
6262
  };
6261
6263
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
6262
6264
  const rawRequest = {
6263
6265
  ...modelSettings,
6266
+ size: modelSettings.size || '1024x1024',
6264
6267
  prompt: rawPromptContent,
6265
6268
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
6266
6269
  response_format: 'url', // TODO: [๐Ÿง ] Maybe allow b64_json
@@ -21326,11 +21329,7 @@
21326
21329
  // TODO: [๐Ÿšœ] DRY
21327
21330
  if ($taskJson.modelRequirements[command.key] !== undefined) {
21328
21331
  if ($taskJson.modelRequirements[command.key] === command.value) {
21329
- console.warn(`Multiple commands \`MODEL ${{
21330
- modelName: 'NAME',
21331
- modelVariant: 'VARIANT',
21332
- maxTokens: '???',
21333
- }[command.key]} ${command.value}\` in the task "${$taskJson.title || $taskJson.name}"`);
21332
+ console.warn(`Multiple commands \`MODEL ${command.key} ${command.value}\` in the task "${$taskJson.title || $taskJson.name}"`);
21334
21333
  // <- TODO: [๐Ÿฎ] Some standard way how to transform errors into warnings and how to handle non-critical fails during the tasks
21335
21334
  }
21336
21335
  else {