@llumiverse/core 0.15.0 → 0.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. package/README.md +3 -3
  2. package/lib/cjs/CompletionStream.js +31 -10
  3. package/lib/cjs/CompletionStream.js.map +1 -1
  4. package/lib/cjs/Driver.js +19 -21
  5. package/lib/cjs/Driver.js.map +1 -1
  6. package/lib/cjs/async.js +3 -2
  7. package/lib/cjs/async.js.map +1 -1
  8. package/lib/cjs/formatters/index.js +1 -3
  9. package/lib/cjs/formatters/index.js.map +1 -1
  10. package/lib/cjs/formatters/{claude.js → nova.js} +33 -37
  11. package/lib/cjs/formatters/nova.js.map +1 -0
  12. package/lib/cjs/formatters/openai.js +36 -6
  13. package/lib/cjs/formatters/openai.js.map +1 -1
  14. package/lib/cjs/index.js +1 -0
  15. package/lib/cjs/index.js.map +1 -1
  16. package/lib/cjs/options/bedrock.js +343 -0
  17. package/lib/cjs/options/bedrock.js.map +1 -0
  18. package/lib/cjs/options/groq.js +37 -0
  19. package/lib/cjs/options/groq.js.map +1 -0
  20. package/lib/cjs/options/openai.js +123 -0
  21. package/lib/cjs/options/openai.js.map +1 -0
  22. package/lib/cjs/options/vertexai.js +257 -0
  23. package/lib/cjs/options/vertexai.js.map +1 -0
  24. package/lib/cjs/options.js +54 -0
  25. package/lib/cjs/options.js.map +1 -0
  26. package/lib/cjs/types.js +34 -1
  27. package/lib/cjs/types.js.map +1 -1
  28. package/lib/esm/CompletionStream.js +31 -10
  29. package/lib/esm/CompletionStream.js.map +1 -1
  30. package/lib/esm/Driver.js +20 -22
  31. package/lib/esm/Driver.js.map +1 -1
  32. package/lib/esm/async.js +3 -2
  33. package/lib/esm/async.js.map +1 -1
  34. package/lib/esm/formatters/index.js +1 -3
  35. package/lib/esm/formatters/index.js.map +1 -1
  36. package/lib/esm/formatters/{claude.js → nova.js} +32 -36
  37. package/lib/esm/formatters/nova.js.map +1 -0
  38. package/lib/esm/formatters/openai.js +36 -6
  39. package/lib/esm/formatters/openai.js.map +1 -1
  40. package/lib/esm/index.js +1 -0
  41. package/lib/esm/index.js.map +1 -1
  42. package/lib/esm/options/bedrock.js +340 -0
  43. package/lib/esm/options/bedrock.js.map +1 -0
  44. package/lib/esm/options/groq.js +34 -0
  45. package/lib/esm/options/groq.js.map +1 -0
  46. package/lib/esm/options/openai.js +120 -0
  47. package/lib/esm/options/openai.js.map +1 -0
  48. package/lib/esm/options/vertexai.js +253 -0
  49. package/lib/esm/options/vertexai.js.map +1 -0
  50. package/lib/esm/options.js +50 -0
  51. package/lib/esm/options.js.map +1 -0
  52. package/lib/esm/types.js +33 -0
  53. package/lib/esm/types.js.map +1 -1
  54. package/lib/types/CompletionStream.d.ts +1 -1
  55. package/lib/types/CompletionStream.d.ts.map +1 -1
  56. package/lib/types/Driver.d.ts +5 -4
  57. package/lib/types/Driver.d.ts.map +1 -1
  58. package/lib/types/async.d.ts +3 -2
  59. package/lib/types/async.d.ts.map +1 -1
  60. package/lib/types/formatters/generic.d.ts.map +1 -1
  61. package/lib/types/formatters/index.d.ts +1 -3
  62. package/lib/types/formatters/index.d.ts.map +1 -1
  63. package/lib/types/formatters/nova.d.ts +40 -0
  64. package/lib/types/formatters/nova.d.ts.map +1 -0
  65. package/lib/types/formatters/openai.d.ts +13 -1
  66. package/lib/types/formatters/openai.d.ts.map +1 -1
  67. package/lib/types/index.d.ts +1 -0
  68. package/lib/types/index.d.ts.map +1 -1
  69. package/lib/types/options/bedrock.d.ts +32 -0
  70. package/lib/types/options/bedrock.d.ts.map +1 -0
  71. package/lib/types/options/groq.d.ts +12 -0
  72. package/lib/types/options/groq.d.ts.map +1 -0
  73. package/lib/types/options/openai.d.ts +21 -0
  74. package/lib/types/options/openai.d.ts.map +1 -0
  75. package/lib/types/options/vertexai.d.ts +52 -0
  76. package/lib/types/options/vertexai.d.ts.map +1 -0
  77. package/lib/types/options.d.ts +14 -0
  78. package/lib/types/options.d.ts.map +1 -0
  79. package/lib/types/types.d.ts +133 -49
  80. package/lib/types/types.d.ts.map +1 -1
  81. package/package.json +6 -6
  82. package/src/CompletionStream.ts +31 -11
  83. package/src/Driver.ts +29 -25
  84. package/src/async.ts +7 -5
  85. package/src/formatters/index.ts +1 -3
  86. package/src/formatters/nova.ts +141 -0
  87. package/src/formatters/openai.ts +52 -12
  88. package/src/index.ts +2 -1
  89. package/src/options/bedrock.ts +388 -0
  90. package/src/options/groq.ts +47 -0
  91. package/src/options/openai.ts +148 -0
  92. package/src/options/vertexai.ts +312 -0
  93. package/src/options.ts +62 -0
  94. package/src/types.ts +167 -52
  95. package/lib/cjs/formatters/claude.js.map +0 -1
  96. package/lib/cjs/formatters/llama2.js +0 -48
  97. package/lib/cjs/formatters/llama2.js.map +0 -1
  98. package/lib/cjs/formatters/llama3.js +0 -42
  99. package/lib/cjs/formatters/llama3.js.map +0 -1
  100. package/lib/esm/formatters/claude.js.map +0 -1
  101. package/lib/esm/formatters/llama2.js +0 -45
  102. package/lib/esm/formatters/llama2.js.map +0 -1
  103. package/lib/esm/formatters/llama3.js +0 -39
  104. package/lib/esm/formatters/llama3.js.map +0 -1
  105. package/lib/types/formatters/claude.d.ts +0 -25
  106. package/lib/types/formatters/claude.d.ts.map +0 -1
  107. package/lib/types/formatters/llama2.d.ts +0 -4
  108. package/lib/types/formatters/llama2.d.ts.map +0 -1
  109. package/lib/types/formatters/llama3.d.ts +0 -7
  110. package/lib/types/formatters/llama3.d.ts.map +0 -1
  111. package/src/formatters/claude.ts +0 -131
  112. package/src/formatters/llama2.ts +0 -58
  113. package/src/formatters/llama3.ts +0 -55
@@ -0,0 +1,312 @@
1
+ import { ModelOptionsInfo, ModelOptionInfoItem, OptionType, SharedOptions, ModelOptions } from "../types.js";
2
+ import { textOptionsFallback } from "../options.js";
3
+
4
+ // Union type of all Bedrock options
5
+ export type VertexAIOptions = ImagenOptions | VertexAIClaudeOptions;
6
+
7
+ export enum ImagenTaskType {
8
+ TEXT_IMAGE = "TEXT_IMAGE",
9
+ EDIT_MODE_INPAINT_REMOVAL = "EDIT_MODE_INPAINT_REMOVAL",
10
+ EDIT_MODE_INPAINT_INSERTION = "EDIT_MODE_INPAINT_INSERTION",
11
+ EDIT_MODE_BGSWAP = "EDIT_MODE_BGSWAP",
12
+ EDIT_MODE_OUTPAINT = "EDIT_MODE_OUTPAINT",
13
+ CUSTOMIZATION_SUBJECT = "CUSTOMIZATION_SUBJECT",
14
+ CUSTOMIZATION_STYLE = "CUSTOMIZATION_STYLE",
15
+ CUSTOMIZATION_CONTROLLED = "CUSTOMIZATION_CONTROLLED",
16
+ CUSTOMIZATION_INSTRUCT = "CUSTOMIZATION_INSTRUCT",
17
+ }
18
+
19
+ export enum ImagenMaskMode {
20
+ MASK_MODE_USER_PROVIDED = "MASK_MODE_USER_PROVIDED",
21
+ MASK_MODE_BACKGROUND = "MASK_MODE_BACKGROUND",
22
+ MASK_MODE_FOREGROUND = "MASK_MODE_FOREGROUND",
23
+ MASK_MODE_SEMANTIC = "MASK_MODE_SEMANTIC",
24
+ }
25
+
26
+ export interface ImagenOptions {
27
+ _option_id: "vertexai-imagen"
28
+
29
+ //General and generate options
30
+ number_of_images?: number;
31
+ seed?: number;
32
+ person_generation?: "dont_allow" | "allow_adults" | "allow_all";
33
+ safety_setting?: "block_none" | "block_only_high" | "block_medium_and_above" | "block_low_and_above"; //The "off" option does not seem to work for Imagen 3, might be only for text models
34
+ image_file_type?: "image/jpeg" | "image/png";
35
+ jpeg_compression_quality?: number;
36
+ aspect_ratio?: "1:1" | "4:3" | "3:4" | "16:9" | "9:16";
37
+ add_watermark?: boolean;
38
+ enhance_prompt?: boolean;
39
+
40
+ //Capability options
41
+ edit_mode?: ImagenTaskType
42
+ guidance_scale?: number;
43
+ edit_steps?: number;
44
+ mask_mode?: ImagenMaskMode;
45
+ mask_dilation?: number;
46
+ mask_class?: number[];
47
+
48
+ //Customization options
49
+ controlType: "CONTROL_TYPE_FACE_MESH" | "CONTROL_TYPE_CANNY" | "CONTROL_TYPE_SCRIBBLE";
50
+ controlImageComputation?: boolean;
51
+ subjectType: "SUBJECT_TYPE_PERSON" | "SUBJECT_TYPE_ANIMAL" | "SUBJECT_TYPE_PRODUCT" | "SUBJECT_TYPE_DEFAULT";
52
+ }
53
+
54
+ export interface VertexAIClaudeOptions {
55
+ _option_id: "vertexai-claude"
56
+ max_tokens?: number;
57
+ temperature?: number;
58
+ top_p?: number;
59
+ top_k?: number;
60
+ stop_sequence?: string[];
61
+ thinking_mode?: boolean;
62
+ thinking_budget_tokens?: number;
63
+ }
64
+
65
+ export function getVertexAiOptions(model: string, option?: ModelOptions): ModelOptionsInfo {
66
+ if (model.includes("imagen-3.0")) {
67
+ const commonOptions: ModelOptionInfoItem[] = [
68
+ {
69
+ name: SharedOptions.number_of_images, type: OptionType.numeric, min: 1, max: 4, default: 1,
70
+ integer: true, description: "Number of Images to generate",
71
+ },
72
+ {
73
+ name: SharedOptions.seed, type: OptionType.numeric, min: 0, max: 4294967295, default: 12,
74
+ integer: true, description: "The seed of the generated image"
75
+ },
76
+ {
77
+ name: "person_generation", type: OptionType.enum, enum: { "Disallow the inclusion of people or faces in images": "dont_allow", "Allow generation of adults only": "allow_adult", "Allow generation of people of all ages": "allow_all" },
78
+ default: "allow_adult", description: "The safety setting for allowing the generation of people in the image"
79
+ },
80
+ {
81
+ name: "safety_setting", type: OptionType.enum, enum: { "Block very few problematic prompts and responses": "block_none", "Block only few problematic prompts and responses": "block_only_high", "Block some problematic prompts and responses": "block_medium_and_above", "Strictest filtering": "block_low_and_above" },
82
+ default: "block_medium_and_above", description: "The overall safety setting"
83
+ },
84
+ ];
85
+
86
+
87
+ const outputOptions: ModelOptionInfoItem[] = [
88
+ {
89
+ name: "image_file_type", type: OptionType.enum, enum: { "JPEG": "image/jpeg", "PNG": "image/png" },
90
+ default: "image/png", description: "The file type of the generated image",
91
+ refresh: true,
92
+ },
93
+ ]
94
+
95
+ const jpegQuality: ModelOptionInfoItem = {
96
+ name: "jpeg_compression_quality", type: OptionType.numeric, min: 0, max: 100, default: 75,
97
+ integer: true, description: "The compression quality of the JPEG image",
98
+ }
99
+
100
+ if ((option as ImagenOptions)?.image_file_type === "image/jpeg") {
101
+ outputOptions.push(jpegQuality);
102
+ }
103
+ if (model.includes("generate")) {
104
+ //Generate models
105
+ const modeOptions: ModelOptionInfoItem[]
106
+ = [
107
+ {
108
+ name: "aspect_ratio", type: OptionType.enum, enum: { "1:1": "1:1", "4:3": "4:3", "3:4": "3:4", "16:9": "16:9", "9:16": "9:16" },
109
+ default: "1:1", description: "The aspect ratio of the generated image"
110
+ },
111
+ {
112
+ name: "add_watermark", type: OptionType.boolean, default: false, description: "Add an invisible watermark to the generated image, useful for detection of AI images"
113
+ },
114
+
115
+ ];
116
+
117
+ const enhanceOptions: ModelOptionInfoItem[] = !model.includes("generate-001") ? [
118
+ {
119
+ name: "enhance_prompt", type: OptionType.boolean, default: true, description: "VertexAI automatically rewrites the prompt to better reflect the prompt's intent."
120
+ },
121
+ ] : [];
122
+
123
+ return {
124
+ _option_id: "vertexai-imagen",
125
+ options: [
126
+ ...commonOptions,
127
+ ...modeOptions,
128
+ ...outputOptions,
129
+ ...enhanceOptions,
130
+ ]
131
+ };
132
+ }
133
+ if (model.includes("capability")) {
134
+ //Edit models
135
+ let guidanceScaleDefault = 75;
136
+ if ((option as ImagenOptions)?.edit_mode === ImagenTaskType.EDIT_MODE_INPAINT_INSERTION) {
137
+ guidanceScaleDefault = 60;
138
+ }
139
+
140
+ const modeOptions: ModelOptionInfoItem[] = [
141
+ {
142
+ name: "edit_mode", type: OptionType.enum,
143
+ enum: {
144
+ "EDIT_MODE_INPAINT_REMOVAL": "EDIT_MODE_INPAINT_REMOVAL",
145
+ "EDIT_MODE_INPAINT_INSERTION": "EDIT_MODE_INPAINT_INSERTION",
146
+ "EDIT_MODE_BGSWAP": "EDIT_MODE_BGSWAP",
147
+ "EDIT_MODE_OUTPAINT": "EDIT_MODE_OUTPAINT",
148
+ "CUSTOMIZATION_SUBJECT": "CUSTOMIZATION_SUBJECT",
149
+ "CUSTOMIZATION_STYLE": "CUSTOMIZATION_STYLE",
150
+ "CUSTOMIZATION_CONTROLLED": "CUSTOMIZATION_CONTROLLED",
151
+ "CUSTOMIZATION_INSTRUCT": "CUSTOMIZATION_INSTRUCT",
152
+ },
153
+ description: "The editing mode. CUSTOMIZATION options use few-shot learning to generate images based on a few examples."
154
+ },
155
+
156
+ {
157
+ name: "guidance_scale", type: OptionType.numeric, min: 0, max: 500, default: guidanceScaleDefault,
158
+ integer: true, description: "How closely the generation follows the prompt"
159
+ }
160
+ ];
161
+
162
+ const maskOptions: ModelOptionInfoItem[] = ((option as ImagenOptions)?.edit_mode?.includes("EDIT")) ? [
163
+ {
164
+ name: "mask_mode", type: OptionType.enum,
165
+ enum: {
166
+ "MASK_MODE_USER_PROVIDED": "MASK_MODE_USER_PROVIDED",
167
+ "MASK_MODE_BACKGROUND": "MASK_MODE_BACKGROUND",
168
+ "MASK_MODE_FOREGROUND": "MASK_MODE_FOREGROUND",
169
+ "MASK_MODE_SEMANTIC": "MASK_MODE_SEMANTIC",
170
+ },
171
+ default: "MASK_MODE_USER_PROVIDED",
172
+ description: "How should the mask for the generation be provided"
173
+ },
174
+ {
175
+ name: "mask_dilation", type: OptionType.numeric, min: 0, max: 1,
176
+ integer: true, description: "The mask dilation, grows the mask by a percetage of image width to compensate for imprecise masks."
177
+ },
178
+ ] : [];
179
+
180
+ const maskClassOptions: ModelOptionInfoItem[] = ((option as ImagenOptions)?.mask_mode === ImagenMaskMode.MASK_MODE_SEMANTIC) ? [
181
+ {
182
+ name: "mask_class", type: OptionType.string_list, default: [],
183
+ description: "Input Class IDs. Create a mask based on image class, based on https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api-customization#segment-ids"
184
+ }
185
+ ] : [];
186
+
187
+ const editOptions: ModelOptionInfoItem[] = (option as ImagenOptions)?.edit_mode?.includes("EDIT") ? [
188
+ {
189
+ name: "edit_steps", type: OptionType.numeric, default: 75,
190
+ integer: true, description: "The number of steps for the base image generation, more steps means more time and better quality"
191
+ },
192
+ ] : [];
193
+
194
+ const customizationOptions: ModelOptionInfoItem[] = (option as ImagenOptions)?.edit_mode === ImagenTaskType.CUSTOMIZATION_CONTROLLED
195
+ || (option as ImagenOptions)?.edit_mode === ImagenTaskType.CUSTOMIZATION_SUBJECT ? [
196
+ {
197
+ name: "controlType", type: OptionType.enum, enum: { "Face Mesh": "CONTROL_TYPE_FACE_MESH", "Canny": "CONTROL_TYPE_CANNY", "Scribble": "CONTROL_TYPE_SCRIBBLE" },
198
+ default: "CONTROL_TYPE_CANNY", description: "Method used to generate the control image"
199
+ },
200
+ {
201
+ name: "controlImageComputation", type: OptionType.boolean, default: true, description: "Should the control image be computed from the input image, or is it provided"
202
+ }
203
+ ] : [];
204
+
205
+ return {
206
+ _option_id: "vertexai-imagen",
207
+ options: [
208
+ ...modeOptions,
209
+ ...commonOptions,
210
+ ...maskOptions,
211
+ ...maskClassOptions,
212
+ ...editOptions,
213
+ ...customizationOptions,
214
+ ...outputOptions,
215
+ ]
216
+ };
217
+ }
218
+ }
219
+ else if (model.includes("gemini")) {
220
+ const max_tokens_limit = getGeminiMaxTokensLimit(model);
221
+ const excludeOptions = ["max_tokens", "presence_penalty"];
222
+ let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
223
+ if (model.includes("1.5")) {
224
+ commonOptions = commonOptions.filter((option) => option.name !== "frequency_penalty");
225
+ }
226
+ const max_tokens: ModelOptionInfoItem[] = [{
227
+ name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
228
+ integer: true, step: 200, description: "The maximum number of tokens to generate"
229
+ }];
230
+ return {
231
+ _option_id: "vertexai-gemini",
232
+ options: [
233
+ ...max_tokens,
234
+ ...commonOptions,
235
+ ]
236
+ };
237
+ }
238
+ else if (model.includes("claude")) {
239
+ const max_tokens_limit = getClaudeMaxTokensLimit(model, option as VertexAIClaudeOptions);
240
+ const excludeOptions = ["max_tokens", "presence_penalty", "frequency_penalty"];
241
+ let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
242
+ const max_tokens: ModelOptionInfoItem[] = [{
243
+ name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
244
+ integer: true, step: 200, description: "The maximum number of tokens to generate"
245
+ }];
246
+
247
+ if (model.includes("3-7")) {
248
+ const claudeModeOptions: ModelOptionInfoItem[] = [
249
+ {
250
+ name: "thinking_mode",
251
+ type: OptionType.boolean,
252
+ default: false,
253
+ description: "If true, use the extended reasoning mode"
254
+ },
255
+ ];
256
+ const claudeThinkingOptions: ModelOptionInfoItem[] = (option as VertexAIClaudeOptions)?.thinking_mode ? [
257
+ {
258
+ name: "thinking_budget_tokens",
259
+ type: OptionType.numeric,
260
+ min: 1024,
261
+ default: 4000,
262
+ integer: true,
263
+ step: 100,
264
+ description: "The target number of tokens to use for reasoning, not a hard limit."
265
+ },
266
+ ] : [];
267
+
268
+ return {
269
+ _option_id: "vertexai-claude",
270
+ options: [
271
+ ...max_tokens,
272
+ ...commonOptions,
273
+ ...claudeModeOptions,
274
+ ...claudeThinkingOptions,
275
+ ]
276
+ };
277
+ }
278
+ return {
279
+ _option_id: "vertexai-claude",
280
+ options: [
281
+ ...max_tokens,
282
+ ...commonOptions,
283
+ ]
284
+ };
285
+ }
286
+ return textOptionsFallback;
287
+ }
288
+ function getGeminiMaxTokensLimit(model: string): number {
289
+ if (model.includes("thinking")) {
290
+ return 65536;
291
+ }
292
+ if (model.includes("ultra") || model.includes("vision")) {
293
+ return 2048;
294
+ }
295
+ return 8192;
296
+ }
297
+ function getClaudeMaxTokensLimit(model: string, option?: VertexAIClaudeOptions): number {
298
+ if (model.includes("3-7")) {
299
+ if (option && option?.thinking_mode) {
300
+ return 128000;
301
+ } else {
302
+ return 8192;
303
+ }
304
+ }
305
+ else if (model.includes("3-5")) {
306
+ return 8192;
307
+ }
308
+ else {
309
+ return 4096;
310
+ }
311
+ }
312
+
package/src/options.ts ADDED
@@ -0,0 +1,62 @@
1
+ import { ModelOptions, ModelOptionsInfo, OptionType, SharedOptions } from "./types.js";
2
+ import { getBedrockOptions } from "./options/bedrock.js";
3
+ import { getVertexAiOptions } from "./options/vertexai.js";
4
+ import { getOpenAiOptions } from "./options/openai.js";
5
+ import { getGroqOptions } from "./options/groq.js";
6
+
7
+ export interface TextFallbackOptions {
8
+ _option_id: "text-fallback"; //For specific models should be format as "provider-model"
9
+ max_tokens?: number;
10
+ temperature?: number;
11
+ top_p?: number;
12
+ top_k?: number;
13
+ presence_penalty?: number;
14
+ frequency_penalty?: number;
15
+ stop_sequence?: string[];
16
+ }
17
+
18
+ export const textOptionsFallback: ModelOptionsInfo = {
19
+ _option_id: "text-fallback",
20
+ options: [
21
+ {
22
+ name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1,
23
+ integer: true, step: 200, description: "The maximum number of tokens to generate"
24
+ },
25
+ {
26
+ name: SharedOptions.temperature, type: OptionType.numeric, min: 0.0, default: 0.7,
27
+ integer: false, step: 0.1, description: "A higher temperature biases toward less likely tokens, making the model more creative"
28
+ },
29
+ {
30
+ name: SharedOptions.top_p, type: OptionType.numeric, min: 0, max: 1,
31
+ integer: false, step: 0.1, description: "Limits token sampling to the cumulative probability of the top p tokens"
32
+ },
33
+ {
34
+ name: SharedOptions.top_k, type: OptionType.numeric, min: 1,
35
+ integer: true, step: 1, description: "Limits token sampling to the top k tokens"
36
+ },
37
+ {
38
+ name: SharedOptions.presence_penalty, type: OptionType.numeric, min: -2.0, max: 2.0,
39
+ integer: false, step: 0.1, description: "Penalise tokens if they appear at least once in the text"
40
+ },
41
+ {
42
+ name: SharedOptions.frequency_penalty, type: OptionType.numeric, min: -2.0, max: 2.0,
43
+ integer: false, step: 0.1, description: "Penalise tokens based on their frequency in the text"
44
+ },
45
+ { name: SharedOptions.stop_sequence, type: OptionType.string_list, value: [], description: "The generation will halt if one of the stop sequences is output" },
46
+ ]
47
+ };
48
+
49
+ export function getOptions(provider?: string, model?: string, options?: ModelOptions): ModelOptionsInfo {
50
+ switch (provider) {
51
+ case "bedrock":
52
+ return getBedrockOptions(model ?? "", options);
53
+ case "vertexai":
54
+ return getVertexAiOptions(model ?? "", options);
55
+ case "openai":
56
+ return getOpenAiOptions(model ?? "", options);
57
+ case "groq":
58
+ return getGroqOptions(model ?? "", options);
59
+ default:
60
+ return textOptionsFallback;
61
+ }
62
+ }
package/src/types.ts CHANGED
@@ -1,21 +1,26 @@
1
- import { JSONSchema4 } from "json-schema";
2
- import { PromptFormatter } from "./formatters/index.js";
3
- import { JSONObject } from "./json.js";
1
+ import { JSONSchema4 } from 'json-schema';
2
+
3
+ import { PromptFormatter } from './formatters/index.js';
4
+ import { JSONObject } from './json.js';
5
+ import { TextFallbackOptions } from './options.js';
6
+ import { VertexAIOptions } from './options/vertexai.js';
7
+ import { BedrockOptions } from './options/bedrock.js';
8
+ import { OpenAiOptions } from './options/openai.js';
4
9
 
5
10
  export interface EmbeddingsOptions {
6
11
  /**
7
- * The content to generate the embeddings for. Required.
12
+ * The text to generate the embeddings for. One of text or image is required.
8
13
  */
9
- content: string;
14
+ text?: string;
10
15
  /**
11
- * The model to use to generate the embeddings. Optional.
16
+ * The image to generate embeddings for
12
17
  */
13
- model?: string;
18
+ image?: string
14
19
  /**
15
- * Additional options for the embeddings generation. Optional.
16
- * The supported properties depends on the target implementation.
20
+ * The model to use to generate the embeddings. Optional.
17
21
  */
18
- [key: string]: any;
22
+ model?: string;
23
+
19
24
  }
20
25
 
21
26
  export interface EmbeddingsResult {
@@ -26,32 +31,60 @@ export interface EmbeddingsResult {
26
31
  /**
27
32
  * The model used to hgenerate the embeddings.
28
33
  */
29
- model?: string;
34
+ model: string;
30
35
  /**
31
36
  * Number of tokens of the input text.
32
37
  */
33
38
  token_count?: number;
34
- /**
35
- * Additional properties. Depends on the target implementation.
36
- */
37
- [key: string]: any;
39
+
38
40
  }
39
41
 
40
42
  export interface ResultValidationError {
41
- code: 'validation_error' | 'json_error';
43
+ code: 'validation_error' | 'json_error' | 'content_policy_violation';
42
44
  message: string;
43
45
  data?: string;
44
46
  }
45
47
 
48
+ //ResultT should be either JSONObject or string
49
+ //Internal structure used in driver implementation.
50
+ export interface CompletionChunkObject<ResultT = any> {
51
+ result: ResultT;
52
+ token_usage?: ExecutionTokenUsage;
53
+ finish_reason?: "stop" | "length" | string;
54
+ }
55
+
56
+ //Internal structure used in driver implementation.
57
+ export type CompletionChunk = CompletionChunkObject | string;
58
+
59
+ export interface ToolDefinition {
60
+ name: string,
61
+ description?: string,
62
+ input_schema: {
63
+ type: 'object';
64
+ properties?: unknown | null | undefined;
65
+ [k: string]: unknown;
66
+ },
67
+ }
68
+
69
+ export interface ToolUse {
70
+ id: string,
71
+ name: string,
72
+ input: JSONObject | null
73
+ }
74
+
75
+ //ResultT should be either JSONObject or string
46
76
  export interface Completion<ResultT = any> {
47
77
  // the driver impl must return the result and optionally the token_usage. the execution time is computed by the extended abstract driver
48
78
  result: ResultT;
49
79
  token_usage?: ExecutionTokenUsage;
50
-
80
+ /**
81
+ * Contains the tools from which the model awaits information.
82
+ */
83
+ tool_use?: ToolUse[];
51
84
  /**
52
85
  * The finish reason as reported by the model: stop | length or other model specific values
53
86
  */
54
- finish_reason?: "stop" | "length" | string;
87
+ finish_reason?: "stop" | "length" | "tool_use" | string;
55
88
 
56
89
  /**
57
90
  * Set only if a result validation error occured, otherwise if the result is valid the error field is undefined
@@ -64,6 +97,16 @@ export interface Completion<ResultT = any> {
64
97
  */
65
98
  original_response?: Record<string, any>;
66
99
 
100
+ /**
101
+ * The conversation context. This is an opaque structure that can be passed to the next request to restore the context.
102
+ */
103
+ conversation?: unknown;
104
+ }
105
+
106
+ export interface ImageGeneration {
107
+
108
+ images?: string[];
109
+
67
110
  }
68
111
 
69
112
  export interface ExecutionResponse<PromptT = any> extends Completion {
@@ -72,6 +115,10 @@ export interface ExecutionResponse<PromptT = any> extends Completion {
72
115
  * The time it took to execute the request in seconds
73
116
  */
74
117
  execution_time?: number;
118
+ /**
119
+ * The number of chunks for streamed executions
120
+ */
121
+ chunks?: number;
75
122
  }
76
123
 
77
124
 
@@ -90,6 +137,9 @@ export interface DriverOptions {
90
137
  logger?: Logger | "console";
91
138
  }
92
139
 
140
+ //Options are split into PromptOptions, ModelOptions and ExecutionOptions.
141
+ //ExecutionOptions are most often used within llumiverse as they are the most complete.
142
+ //The base types are useful for external code that needs to interact with llumiverse.
93
143
  export interface PromptOptions {
94
144
  model: string;
95
145
  /**
@@ -99,50 +149,100 @@ export interface PromptOptions {
99
149
  format?: PromptFormatter;
100
150
  result_schema?: JSONSchema4;
101
151
  }
102
- export interface ExecutionOptions extends PromptOptions {
103
- temperature?: number;
104
- max_tokens?: number;
105
- stop_sequence?: string | string[];
106
152
 
153
+ export interface ExecutionOptions extends PromptOptions {
107
154
  /**
108
- * restricts the selection of tokens to the “k” most likely options, based on their probabilities
109
- * Lower values make the model more deterministic, more focused. Examples:
110
- * - 10 - result will be highly controlled anc contextually relevant
111
- * - 50 - result will be more creative but maintaining a balance between control and creativity
112
- * - 100 - will lead to more creative and less predictable outputs
113
- * It will be ignored on OpenAI since it does not support it
155
+ * If set to true the original response from the target LLM will be included in the response under the original_response field.
156
+ * This is useful for debugging and for some advanced use cases.
157
+ * It is ignored on streaming requests
114
158
  */
115
- top_k?: number;
116
-
159
+ include_original_response?: boolean;
160
+ model_options?: ModelOptions;
161
+ output_modality: Modalities;
117
162
  /**
118
- * An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
119
- * Either use temperature or top_p, not both
163
+ * Available tools for the request
120
164
  */
121
- top_p?: number;
122
-
165
+ tools?: ToolDefinition[];
123
166
  /**
124
- * Only supported for OpenAI. Look at OpenAI documentation for more detailsx
167
+ * This is an opaque structure that provides a conversation context
168
+ * Each driver implementation will return a conversation property in the execution response
169
+ * that can be passed here to restore the context when a new prompt is sent to the model.
125
170
  */
126
- top_logprobs?: number;
171
+ conversation?: unknown | null;
172
+ }
127
173
 
128
- /**
129
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
130
- * Ignored for models which doesn;t support it
131
- */
132
- presence_penalty?: number;
174
+ //Common names to share between different models
175
+ export enum SharedOptions {
176
+ //Text
177
+ max_tokens = "max_tokens",
178
+ temperature = "temperature",
179
+ top_p = "top_p",
180
+ top_k = "top_k",
181
+ presence_penalty = "presence_penalty",
182
+ frequency_penalty = "frequency_penalty",
183
+ stop_sequence = "stop_sequence",
184
+
185
+ //Image
186
+ seed = "seed",
187
+ number_of_images = "number_of_images",
188
+ }
133
189
 
134
- /**
135
- * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
136
- * Ignored for models which doesn;t support it
137
- */
138
- frequency_penalty?: number;
190
+ export enum OptionType {
191
+ numeric = "numeric",
192
+ enum = "enum",
193
+ boolean = "boolean",
194
+ string_list = "string_list"
195
+ }
139
196
 
140
- /**
141
- * If set to true the original response from the target LLM will be included in the response under the original_response field.
142
- * This is useful for debugging and for some advanced use cases.
143
- * It is ignored on streaming requests
144
- */
145
- include_original_response?: boolean;
197
+ // ============== Model Options ===============
198
+
199
+ export type ModelOptions = TextFallbackOptions | VertexAIOptions | BedrockOptions | OpenAiOptions;
200
+
201
+ // ============== Option Info ===============
202
+
203
+ export interface ModelOptionsInfo {
204
+ options: ModelOptionInfoItem[];
205
+ _option_id: string; //Should follow same ids as ModelOptions
206
+ }
207
+
208
+ export type ModelOptionInfoItem = NumericOptionInfo | EnumOptionInfo | BooleanOptionInfo | StringListOptionInfo;
209
+ interface OptionInfoPrototype {
210
+ type: OptionType;
211
+ name: string;
212
+ description?: string;
213
+
214
+ //If this is true, whether other options apply is dependent on this option
215
+ //Therefore, if this option is changed, the set of available options should be refreshed.
216
+ refresh?: boolean;
217
+ }
218
+
219
+ export interface NumericOptionInfo extends OptionInfoPrototype {
220
+ type: OptionType.numeric;
221
+ value?: number;
222
+ min?: number;
223
+ max?: number;
224
+ step?: number;
225
+ integer?: boolean;
226
+ default?: number;
227
+ }
228
+
229
+ export interface EnumOptionInfo extends OptionInfoPrototype {
230
+ type: OptionType.enum;
231
+ value?: string;
232
+ enum: Record<string, string>;
233
+ default?: string;
234
+ }
235
+
236
+ export interface BooleanOptionInfo extends OptionInfoPrototype {
237
+ type: OptionType.boolean;
238
+ value?: boolean;
239
+ default?: boolean;
240
+ }
241
+
242
+ export interface StringListOptionInfo extends OptionInfoPrototype {
243
+ type: OptionType.string_list;
244
+ value?: string[];
245
+ default?: string[];
146
246
  }
147
247
 
148
248
  // ============== Prompts ===============
@@ -151,11 +251,21 @@ export enum PromptRole {
151
251
  system = "system",
152
252
  user = "user",
153
253
  assistant = "assistant",
254
+ negative = "negative",
255
+ mask = "mask",
256
+ /**
257
+ * Used to send the response of a tool
258
+ */
259
+ tool = "tool"
154
260
  }
155
261
 
156
262
  export interface PromptSegment {
157
263
  role: PromptRole;
158
264
  content: string;
265
+ /**
266
+ * The tool use id if the segment is a tool response
267
+ */
268
+ tool_use_id?: string;
159
269
  files?: DataSource[]
160
270
  }
161
271
 
@@ -165,6 +275,11 @@ export interface ExecutionTokenUsage {
165
275
  total?: number;
166
276
  }
167
277
 
278
+ export enum Modalities {
279
+ text = "text",
280
+ image = "image"
281
+ }
282
+
168
283
 
169
284
  // ============== AI MODEL ==============
170
285