@promptbook/ollama 0.104.0-6 → 0.104.0-8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,7 +17,17 @@ export type CompletionModelRequirements = CommonModelRequirements & {
17
17
  /**
18
18
  * Completion model variant
19
19
  */
20
- modelVariant: 'COMPLETION';
20
+ readonly modelVariant: 'COMPLETION';
21
+ /**
22
+ * The temperature of the model
23
+ *
24
+ * Note: [💱] Promptbook is using just `temperature` (not `top_k` and `top_p`)
25
+ */
26
+ readonly temperature?: number_model_temperature;
27
+ /**
28
+ * Maximum number of tokens that can be generated by the model
29
+ */
30
+ readonly maxTokens?: number;
21
31
  };
22
32
  /**
23
33
  * Model requirements for the chat variant
@@ -28,11 +38,21 @@ export type ChatModelRequirements = CommonModelRequirements & {
28
38
  /**
29
39
  * Chat model variant
30
40
  */
31
- modelVariant: 'CHAT';
41
+ readonly modelVariant: 'CHAT';
32
42
  /**
33
43
  * System message to be used in the model
34
44
  */
35
45
  readonly systemMessage?: string_system_message;
46
+ /**
47
+ * The temperature of the model
48
+ *
49
+ * Note: [💱] Promptbook is using just `temperature` (not `top_k` and `top_p`)
50
+ */
51
+ readonly temperature?: number_model_temperature;
52
+ /**
53
+ * Maximum number of tokens that can be generated by the model
54
+ */
55
+ readonly maxTokens?: number;
36
56
  };
37
57
  /**
38
58
  * Model requirements for the image generation variant
@@ -43,7 +63,21 @@ export type ImageGenerationModelRequirements = CommonModelRequirements & {
43
63
  /**
44
64
  * Image generation model variant
45
65
  */
46
- modelVariant: 'IMAGE_GENERATION';
66
+ readonly modelVariant: 'IMAGE_GENERATION';
67
+ /**
68
+ * Size of the generated image
69
+ *
70
+ * e.g. '1536x1536'
71
+ */
72
+ readonly size?: '1024x1024' | '1792x1024' | '1024x1792' | `${number}x${number}`;
73
+ /**
74
+ * Quality of the generated image
75
+ */
76
+ readonly quality?: 'standard' | 'hd';
77
+ /**
78
+ * Style of the generated image
79
+ */
80
+ readonly style?: 'vivid' | 'natural';
47
81
  };
48
82
  /**
49
83
  * Model requirements for the embedding variant
@@ -54,7 +88,7 @@ export type EmbeddingModelRequirements = CommonModelRequirements & {
54
88
  /**
55
89
  * Embedding model variant
56
90
  */
57
- modelVariant: 'EMBEDDING';
91
+ readonly modelVariant: 'EMBEDDING';
58
92
  };
59
93
  /**
60
94
  * Common properties for all model requirements variants
@@ -84,20 +118,10 @@ export type CommonModelRequirements = {
84
118
  * @example 'gpt-4', 'gpt-4-32k-0314', 'gpt-3.5-turbo-instruct',...
85
119
  */
86
120
  readonly modelName?: string_model_name;
87
- /**
88
- * The temperature of the model
89
- *
90
- * Note: [💱] Promptbook is using just `temperature` (not `top_k` and `top_p`)
91
- */
92
- readonly temperature?: number_model_temperature;
93
121
  /**
94
122
  * Seed for the model
95
123
  */
96
124
  readonly seed?: number_seed;
97
- /**
98
- * Maximum number of tokens that can be generated by the model
99
- */
100
- readonly maxTokens?: number;
101
125
  };
102
126
  /**
103
127
  * TODO: [🧠][🈁] `seed` should maybe be somewhere else (not in `ModelRequirements`) (similar that `user` identification is not here)
@@ -14,9 +14,15 @@ export type string_model_name = 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-3
14
14
  /**
15
15
  * Semantic helper
16
16
  *
17
- * For example `"A cat wearing a hat"`
17
+ * For example `"How many eyes does a cat have?"`
18
18
  */
19
19
  export type string_prompt = string;
20
+ /**
21
+ * Semantic helper
22
+ *
23
+ * For example `"A cat wearing a hat"`
24
+ */
25
+ export type string_prompt_image = string;
20
26
  /**
21
27
  * Semantic helper
22
28
  *
@@ -1,10 +1,11 @@
1
+ import { string_color, string_data_url, string_url_image } from '../../../types/typeAliases';
1
2
  import { Color } from '../Color';
2
3
  /**
3
4
  * Makes data url from color
4
5
  *
5
6
  * @public exported from `@promptbook/color`
6
7
  */
7
- export declare function colorToDataUrl(color: Color): string;
8
+ export declare function colorToDataUrl(color: Color | string_color): string_data_url & string_url_image;
8
9
  /**
9
10
  * TODO: Make as functions NOT const
10
11
  */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-5`).
18
+ * It follows semantic versioning (e.g., `0.104.0-7`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/ollama",
3
- "version": "0.104.0-6",
3
+ "version": "0.104.0-8",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -94,7 +94,7 @@
94
94
  "module": "./esm/index.es.js",
95
95
  "typings": "./esm/typings/src/_packages/ollama.index.d.ts",
96
96
  "peerDependencies": {
97
- "@promptbook/core": "0.104.0-6"
97
+ "@promptbook/core": "0.104.0-8"
98
98
  },
99
99
  "dependencies": {
100
100
  "bottleneck": "2.19.5",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-6';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-8';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3453,13 +3453,14 @@
3453
3453
  const modelName = currentModelRequirements.modelName || this.getDefaultImageGenerationModel().modelName;
3454
3454
  const modelSettings = {
3455
3455
  model: modelName,
3456
- // size: currentModelRequirements.size,
3457
- // quality: currentModelRequirements.quality,
3458
- // style: currentModelRequirements.style,
3456
+ size: currentModelRequirements.size,
3457
+ quality: currentModelRequirements.quality,
3458
+ style: currentModelRequirements.style,
3459
3459
  };
3460
3460
  const rawPromptContent = templateParameters(content, { ...parameters, modelName });
3461
3461
  const rawRequest = {
3462
3462
  ...modelSettings,
3463
+ size: modelSettings.size || '1024x1024',
3463
3464
  prompt: rawPromptContent,
3464
3465
  user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3465
3466
  response_format: 'url', // TODO: [🧠] Maybe allow b64_json