@llumiverse/core 0.15.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. package/README.md +3 -3
  2. package/lib/cjs/CompletionStream.js +31 -10
  3. package/lib/cjs/CompletionStream.js.map +1 -1
  4. package/lib/cjs/Driver.js +20 -22
  5. package/lib/cjs/Driver.js.map +1 -1
  6. package/lib/cjs/async.js +3 -2
  7. package/lib/cjs/async.js.map +1 -1
  8. package/lib/cjs/formatters/commons.js.map +1 -1
  9. package/lib/cjs/formatters/generic.js.map +1 -1
  10. package/lib/cjs/formatters/index.js +1 -3
  11. package/lib/cjs/formatters/index.js.map +1 -1
  12. package/lib/cjs/formatters/{claude.js → nova.js} +33 -37
  13. package/lib/cjs/formatters/nova.js.map +1 -0
  14. package/lib/cjs/formatters/openai.js +36 -6
  15. package/lib/cjs/formatters/openai.js.map +1 -1
  16. package/lib/cjs/index.js +1 -0
  17. package/lib/cjs/index.js.map +1 -1
  18. package/lib/cjs/options/bedrock.js +343 -0
  19. package/lib/cjs/options/bedrock.js.map +1 -0
  20. package/lib/cjs/options/groq.js +37 -0
  21. package/lib/cjs/options/groq.js.map +1 -0
  22. package/lib/cjs/options/openai.js +123 -0
  23. package/lib/cjs/options/openai.js.map +1 -0
  24. package/lib/cjs/options/vertexai.js +257 -0
  25. package/lib/cjs/options/vertexai.js.map +1 -0
  26. package/lib/cjs/options.js +54 -0
  27. package/lib/cjs/options.js.map +1 -0
  28. package/lib/cjs/types.js +34 -1
  29. package/lib/cjs/types.js.map +1 -1
  30. package/lib/esm/CompletionStream.js +31 -10
  31. package/lib/esm/CompletionStream.js.map +1 -1
  32. package/lib/esm/Driver.js +21 -23
  33. package/lib/esm/Driver.js.map +1 -1
  34. package/lib/esm/async.js +3 -2
  35. package/lib/esm/async.js.map +1 -1
  36. package/lib/esm/formatters/commons.js.map +1 -1
  37. package/lib/esm/formatters/generic.js.map +1 -1
  38. package/lib/esm/formatters/index.js +1 -3
  39. package/lib/esm/formatters/index.js.map +1 -1
  40. package/lib/esm/formatters/{claude.js → nova.js} +32 -36
  41. package/lib/esm/formatters/nova.js.map +1 -0
  42. package/lib/esm/formatters/openai.js +36 -6
  43. package/lib/esm/formatters/openai.js.map +1 -1
  44. package/lib/esm/index.js +1 -0
  45. package/lib/esm/index.js.map +1 -1
  46. package/lib/esm/options/bedrock.js +340 -0
  47. package/lib/esm/options/bedrock.js.map +1 -0
  48. package/lib/esm/options/groq.js +34 -0
  49. package/lib/esm/options/groq.js.map +1 -0
  50. package/lib/esm/options/openai.js +120 -0
  51. package/lib/esm/options/openai.js.map +1 -0
  52. package/lib/esm/options/vertexai.js +253 -0
  53. package/lib/esm/options/vertexai.js.map +1 -0
  54. package/lib/esm/options.js +50 -0
  55. package/lib/esm/options.js.map +1 -0
  56. package/lib/esm/types.js +33 -0
  57. package/lib/esm/types.js.map +1 -1
  58. package/lib/types/CompletionStream.d.ts +1 -1
  59. package/lib/types/CompletionStream.d.ts.map +1 -1
  60. package/lib/types/Driver.d.ts +5 -4
  61. package/lib/types/Driver.d.ts.map +1 -1
  62. package/lib/types/async.d.ts +3 -2
  63. package/lib/types/async.d.ts.map +1 -1
  64. package/lib/types/formatters/commons.d.ts +2 -2
  65. package/lib/types/formatters/commons.d.ts.map +1 -1
  66. package/lib/types/formatters/generic.d.ts +3 -3
  67. package/lib/types/formatters/generic.d.ts.map +1 -1
  68. package/lib/types/formatters/index.d.ts +3 -5
  69. package/lib/types/formatters/index.d.ts.map +1 -1
  70. package/lib/types/formatters/nova.d.ts +40 -0
  71. package/lib/types/formatters/nova.d.ts.map +1 -0
  72. package/lib/types/formatters/openai.d.ts +13 -1
  73. package/lib/types/formatters/openai.d.ts.map +1 -1
  74. package/lib/types/index.d.ts +1 -0
  75. package/lib/types/index.d.ts.map +1 -1
  76. package/lib/types/options/bedrock.d.ts +32 -0
  77. package/lib/types/options/bedrock.d.ts.map +1 -0
  78. package/lib/types/options/groq.d.ts +12 -0
  79. package/lib/types/options/groq.d.ts.map +1 -0
  80. package/lib/types/options/openai.d.ts +21 -0
  81. package/lib/types/options/openai.d.ts.map +1 -0
  82. package/lib/types/options/vertexai.d.ts +52 -0
  83. package/lib/types/options/vertexai.d.ts.map +1 -0
  84. package/lib/types/options.d.ts +14 -0
  85. package/lib/types/options.d.ts.map +1 -0
  86. package/lib/types/types.d.ts +151 -52
  87. package/lib/types/types.d.ts.map +1 -1
  88. package/package.json +6 -8
  89. package/src/CompletionStream.ts +31 -11
  90. package/src/Driver.ts +30 -26
  91. package/src/async.ts +7 -5
  92. package/src/formatters/commons.ts +2 -2
  93. package/src/formatters/generic.ts +2 -2
  94. package/src/formatters/index.ts +3 -6
  95. package/src/formatters/nova.ts +141 -0
  96. package/src/formatters/openai.ts +52 -12
  97. package/src/index.ts +2 -1
  98. package/src/options/bedrock.ts +388 -0
  99. package/src/options/groq.ts +47 -0
  100. package/src/options/openai.ts +148 -0
  101. package/src/options/vertexai.ts +312 -0
  102. package/src/options.ts +62 -0
  103. package/src/types.ts +192 -53
  104. package/lib/cjs/formatters/claude.js.map +0 -1
  105. package/lib/cjs/formatters/llama2.js +0 -48
  106. package/lib/cjs/formatters/llama2.js.map +0 -1
  107. package/lib/cjs/formatters/llama3.js +0 -42
  108. package/lib/cjs/formatters/llama3.js.map +0 -1
  109. package/lib/esm/formatters/claude.js.map +0 -1
  110. package/lib/esm/formatters/llama2.js +0 -45
  111. package/lib/esm/formatters/llama2.js.map +0 -1
  112. package/lib/esm/formatters/llama3.js +0 -39
  113. package/lib/esm/formatters/llama3.js.map +0 -1
  114. package/lib/types/formatters/claude.d.ts +0 -25
  115. package/lib/types/formatters/claude.d.ts.map +0 -1
  116. package/lib/types/formatters/llama2.d.ts +0 -4
  117. package/lib/types/formatters/llama2.d.ts.map +0 -1
  118. package/lib/types/formatters/llama3.d.ts +0 -7
  119. package/lib/types/formatters/llama3.d.ts.map +0 -1
  120. package/src/formatters/claude.ts +0 -131
  121. package/src/formatters/llama2.ts +0 -58
  122. package/src/formatters/llama3.ts +0 -55
@@ -0,0 +1,312 @@
1
+ import { ModelOptionsInfo, ModelOptionInfoItem, OptionType, SharedOptions, ModelOptions } from "../types.js";
2
+ import { textOptionsFallback } from "../options.js";
3
+
4
+ // Union type of all Bedrock options
5
+ export type VertexAIOptions = ImagenOptions | VertexAIClaudeOptions;
6
+
7
+ export enum ImagenTaskType {
8
+ TEXT_IMAGE = "TEXT_IMAGE",
9
+ EDIT_MODE_INPAINT_REMOVAL = "EDIT_MODE_INPAINT_REMOVAL",
10
+ EDIT_MODE_INPAINT_INSERTION = "EDIT_MODE_INPAINT_INSERTION",
11
+ EDIT_MODE_BGSWAP = "EDIT_MODE_BGSWAP",
12
+ EDIT_MODE_OUTPAINT = "EDIT_MODE_OUTPAINT",
13
+ CUSTOMIZATION_SUBJECT = "CUSTOMIZATION_SUBJECT",
14
+ CUSTOMIZATION_STYLE = "CUSTOMIZATION_STYLE",
15
+ CUSTOMIZATION_CONTROLLED = "CUSTOMIZATION_CONTROLLED",
16
+ CUSTOMIZATION_INSTRUCT = "CUSTOMIZATION_INSTRUCT",
17
+ }
18
+
19
+ export enum ImagenMaskMode {
20
+ MASK_MODE_USER_PROVIDED = "MASK_MODE_USER_PROVIDED",
21
+ MASK_MODE_BACKGROUND = "MASK_MODE_BACKGROUND",
22
+ MASK_MODE_FOREGROUND = "MASK_MODE_FOREGROUND",
23
+ MASK_MODE_SEMANTIC = "MASK_MODE_SEMANTIC",
24
+ }
25
+
26
+ export interface ImagenOptions {
27
+ _option_id: "vertexai-imagen"
28
+
29
+ //General and generate options
30
+ number_of_images?: number;
31
+ seed?: number;
32
+ person_generation?: "dont_allow" | "allow_adults" | "allow_all";
33
+ safety_setting?: "block_none" | "block_only_high" | "block_medium_and_above" | "block_low_and_above"; //The "off" option does not seem to work for Imagen 3, might be only for text models
34
+ image_file_type?: "image/jpeg" | "image/png";
35
+ jpeg_compression_quality?: number;
36
+ aspect_ratio?: "1:1" | "4:3" | "3:4" | "16:9" | "9:16";
37
+ add_watermark?: boolean;
38
+ enhance_prompt?: boolean;
39
+
40
+ //Capability options
41
+ edit_mode?: ImagenTaskType
42
+ guidance_scale?: number;
43
+ edit_steps?: number;
44
+ mask_mode?: ImagenMaskMode;
45
+ mask_dilation?: number;
46
+ mask_class?: number[];
47
+
48
+ //Customization options
49
+ controlType: "CONTROL_TYPE_FACE_MESH" | "CONTROL_TYPE_CANNY" | "CONTROL_TYPE_SCRIBBLE";
50
+ controlImageComputation?: boolean;
51
+ subjectType: "SUBJECT_TYPE_PERSON" | "SUBJECT_TYPE_ANIMAL" | "SUBJECT_TYPE_PRODUCT" | "SUBJECT_TYPE_DEFAULT";
52
+ }
53
+
54
+ export interface VertexAIClaudeOptions {
55
+ _option_id: "vertexai-claude"
56
+ max_tokens?: number;
57
+ temperature?: number;
58
+ top_p?: number;
59
+ top_k?: number;
60
+ stop_sequence?: string[];
61
+ thinking_mode?: boolean;
62
+ thinking_budget_tokens?: number;
63
+ }
64
+
65
+ export function getVertexAiOptions(model: string, option?: ModelOptions): ModelOptionsInfo {
66
+ if (model.includes("imagen-3.0")) {
67
+ const commonOptions: ModelOptionInfoItem[] = [
68
+ {
69
+ name: SharedOptions.number_of_images, type: OptionType.numeric, min: 1, max: 4, default: 1,
70
+ integer: true, description: "Number of Images to generate",
71
+ },
72
+ {
73
+ name: SharedOptions.seed, type: OptionType.numeric, min: 0, max: 4294967295, default: 12,
74
+ integer: true, description: "The seed of the generated image"
75
+ },
76
+ {
77
+ name: "person_generation", type: OptionType.enum, enum: { "Disallow the inclusion of people or faces in images": "dont_allow", "Allow generation of adults only": "allow_adult", "Allow generation of people of all ages": "allow_all" },
78
+ default: "allow_adult", description: "The safety setting for allowing the generation of people in the image"
79
+ },
80
+ {
81
+ name: "safety_setting", type: OptionType.enum, enum: { "Block very few problematic prompts and responses": "block_none", "Block only few problematic prompts and responses": "block_only_high", "Block some problematic prompts and responses": "block_medium_and_above", "Strictest filtering": "block_low_and_above" },
82
+ default: "block_medium_and_above", description: "The overall safety setting"
83
+ },
84
+ ];
85
+
86
+
87
+ const outputOptions: ModelOptionInfoItem[] = [
88
+ {
89
+ name: "image_file_type", type: OptionType.enum, enum: { "JPEG": "image/jpeg", "PNG": "image/png" },
90
+ default: "image/png", description: "The file type of the generated image",
91
+ refresh: true,
92
+ },
93
+ ]
94
+
95
+ const jpegQuality: ModelOptionInfoItem = {
96
+ name: "jpeg_compression_quality", type: OptionType.numeric, min: 0, max: 100, default: 75,
97
+ integer: true, description: "The compression quality of the JPEG image",
98
+ }
99
+
100
+ if ((option as ImagenOptions)?.image_file_type === "image/jpeg") {
101
+ outputOptions.push(jpegQuality);
102
+ }
103
+ if (model.includes("generate")) {
104
+ //Generate models
105
+ const modeOptions: ModelOptionInfoItem[]
106
+ = [
107
+ {
108
+ name: "aspect_ratio", type: OptionType.enum, enum: { "1:1": "1:1", "4:3": "4:3", "3:4": "3:4", "16:9": "16:9", "9:16": "9:16" },
109
+ default: "1:1", description: "The aspect ratio of the generated image"
110
+ },
111
+ {
112
+ name: "add_watermark", type: OptionType.boolean, default: false, description: "Add an invisible watermark to the generated image, useful for detection of AI images"
113
+ },
114
+
115
+ ];
116
+
117
+ const enhanceOptions: ModelOptionInfoItem[] = !model.includes("generate-001") ? [
118
+ {
119
+ name: "enhance_prompt", type: OptionType.boolean, default: true, description: "VertexAI automatically rewrites the prompt to better reflect the prompt's intent."
120
+ },
121
+ ] : [];
122
+
123
+ return {
124
+ _option_id: "vertexai-imagen",
125
+ options: [
126
+ ...commonOptions,
127
+ ...modeOptions,
128
+ ...outputOptions,
129
+ ...enhanceOptions,
130
+ ]
131
+ };
132
+ }
133
+ if (model.includes("capability")) {
134
+ //Edit models
135
+ let guidanceScaleDefault = 75;
136
+ if ((option as ImagenOptions)?.edit_mode === ImagenTaskType.EDIT_MODE_INPAINT_INSERTION) {
137
+ guidanceScaleDefault = 60;
138
+ }
139
+
140
+ const modeOptions: ModelOptionInfoItem[] = [
141
+ {
142
+ name: "edit_mode", type: OptionType.enum,
143
+ enum: {
144
+ "EDIT_MODE_INPAINT_REMOVAL": "EDIT_MODE_INPAINT_REMOVAL",
145
+ "EDIT_MODE_INPAINT_INSERTION": "EDIT_MODE_INPAINT_INSERTION",
146
+ "EDIT_MODE_BGSWAP": "EDIT_MODE_BGSWAP",
147
+ "EDIT_MODE_OUTPAINT": "EDIT_MODE_OUTPAINT",
148
+ "CUSTOMIZATION_SUBJECT": "CUSTOMIZATION_SUBJECT",
149
+ "CUSTOMIZATION_STYLE": "CUSTOMIZATION_STYLE",
150
+ "CUSTOMIZATION_CONTROLLED": "CUSTOMIZATION_CONTROLLED",
151
+ "CUSTOMIZATION_INSTRUCT": "CUSTOMIZATION_INSTRUCT",
152
+ },
153
+ description: "The editing mode. CUSTOMIZATION options use few-shot learning to generate images based on a few examples."
154
+ },
155
+
156
+ {
157
+ name: "guidance_scale", type: OptionType.numeric, min: 0, max: 500, default: guidanceScaleDefault,
158
+ integer: true, description: "How closely the generation follows the prompt"
159
+ }
160
+ ];
161
+
162
+ const maskOptions: ModelOptionInfoItem[] = ((option as ImagenOptions)?.edit_mode?.includes("EDIT")) ? [
163
+ {
164
+ name: "mask_mode", type: OptionType.enum,
165
+ enum: {
166
+ "MASK_MODE_USER_PROVIDED": "MASK_MODE_USER_PROVIDED",
167
+ "MASK_MODE_BACKGROUND": "MASK_MODE_BACKGROUND",
168
+ "MASK_MODE_FOREGROUND": "MASK_MODE_FOREGROUND",
169
+ "MASK_MODE_SEMANTIC": "MASK_MODE_SEMANTIC",
170
+ },
171
+ default: "MASK_MODE_USER_PROVIDED",
172
+ description: "How should the mask for the generation be provided"
173
+ },
174
+ {
175
+ name: "mask_dilation", type: OptionType.numeric, min: 0, max: 1,
176
+ integer: true, description: "The mask dilation, grows the mask by a percetage of image width to compensate for imprecise masks."
177
+ },
178
+ ] : [];
179
+
180
+ const maskClassOptions: ModelOptionInfoItem[] = ((option as ImagenOptions)?.mask_mode === ImagenMaskMode.MASK_MODE_SEMANTIC) ? [
181
+ {
182
+ name: "mask_class", type: OptionType.string_list, default: [],
183
+ description: "Input Class IDs. Create a mask based on image class, based on https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/imagen-api-customization#segment-ids"
184
+ }
185
+ ] : [];
186
+
187
+ const editOptions: ModelOptionInfoItem[] = (option as ImagenOptions)?.edit_mode?.includes("EDIT") ? [
188
+ {
189
+ name: "edit_steps", type: OptionType.numeric, default: 75,
190
+ integer: true, description: "The number of steps for the base image generation, more steps means more time and better quality"
191
+ },
192
+ ] : [];
193
+
194
+ const customizationOptions: ModelOptionInfoItem[] = (option as ImagenOptions)?.edit_mode === ImagenTaskType.CUSTOMIZATION_CONTROLLED
195
+ || (option as ImagenOptions)?.edit_mode === ImagenTaskType.CUSTOMIZATION_SUBJECT ? [
196
+ {
197
+ name: "controlType", type: OptionType.enum, enum: { "Face Mesh": "CONTROL_TYPE_FACE_MESH", "Canny": "CONTROL_TYPE_CANNY", "Scribble": "CONTROL_TYPE_SCRIBBLE" },
198
+ default: "CONTROL_TYPE_CANNY", description: "Method used to generate the control image"
199
+ },
200
+ {
201
+ name: "controlImageComputation", type: OptionType.boolean, default: true, description: "Should the control image be computed from the input image, or is it provided"
202
+ }
203
+ ] : [];
204
+
205
+ return {
206
+ _option_id: "vertexai-imagen",
207
+ options: [
208
+ ...modeOptions,
209
+ ...commonOptions,
210
+ ...maskOptions,
211
+ ...maskClassOptions,
212
+ ...editOptions,
213
+ ...customizationOptions,
214
+ ...outputOptions,
215
+ ]
216
+ };
217
+ }
218
+ }
219
+ else if (model.includes("gemini")) {
220
+ const max_tokens_limit = getGeminiMaxTokensLimit(model);
221
+ const excludeOptions = ["max_tokens", "presence_penalty"];
222
+ let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
223
+ if (model.includes("1.5")) {
224
+ commonOptions = commonOptions.filter((option) => option.name !== "frequency_penalty");
225
+ }
226
+ const max_tokens: ModelOptionInfoItem[] = [{
227
+ name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
228
+ integer: true, step: 200, description: "The maximum number of tokens to generate"
229
+ }];
230
+ return {
231
+ _option_id: "vertexai-gemini",
232
+ options: [
233
+ ...max_tokens,
234
+ ...commonOptions,
235
+ ]
236
+ };
237
+ }
238
+ else if (model.includes("claude")) {
239
+ const max_tokens_limit = getClaudeMaxTokensLimit(model, option as VertexAIClaudeOptions);
240
+ const excludeOptions = ["max_tokens", "presence_penalty", "frequency_penalty"];
241
+ let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
242
+ const max_tokens: ModelOptionInfoItem[] = [{
243
+ name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
244
+ integer: true, step: 200, description: "The maximum number of tokens to generate"
245
+ }];
246
+
247
+ if (model.includes("3-7")) {
248
+ const claudeModeOptions: ModelOptionInfoItem[] = [
249
+ {
250
+ name: "thinking_mode",
251
+ type: OptionType.boolean,
252
+ default: false,
253
+ description: "If true, use the extended reasoning mode"
254
+ },
255
+ ];
256
+ const claudeThinkingOptions: ModelOptionInfoItem[] = (option as VertexAIClaudeOptions)?.thinking_mode ? [
257
+ {
258
+ name: "thinking_budget_tokens",
259
+ type: OptionType.numeric,
260
+ min: 1024,
261
+ default: 4000,
262
+ integer: true,
263
+ step: 100,
264
+ description: "The target number of tokens to use for reasoning, not a hard limit."
265
+ },
266
+ ] : [];
267
+
268
+ return {
269
+ _option_id: "vertexai-claude",
270
+ options: [
271
+ ...max_tokens,
272
+ ...commonOptions,
273
+ ...claudeModeOptions,
274
+ ...claudeThinkingOptions,
275
+ ]
276
+ };
277
+ }
278
+ return {
279
+ _option_id: "vertexai-claude",
280
+ options: [
281
+ ...max_tokens,
282
+ ...commonOptions,
283
+ ]
284
+ };
285
+ }
286
+ return textOptionsFallback;
287
+ }
288
+ function getGeminiMaxTokensLimit(model: string): number {
289
+ if (model.includes("thinking")) {
290
+ return 65536;
291
+ }
292
+ if (model.includes("ultra") || model.includes("vision")) {
293
+ return 2048;
294
+ }
295
+ return 8192;
296
+ }
297
+ function getClaudeMaxTokensLimit(model: string, option?: VertexAIClaudeOptions): number {
298
+ if (model.includes("3-7")) {
299
+ if (option && option?.thinking_mode) {
300
+ return 128000;
301
+ } else {
302
+ return 8192;
303
+ }
304
+ }
305
+ else if (model.includes("3-5")) {
306
+ return 8192;
307
+ }
308
+ else {
309
+ return 4096;
310
+ }
311
+ }
312
+
package/src/options.ts ADDED
@@ -0,0 +1,62 @@
1
+ import { ModelOptions, ModelOptionsInfo, OptionType, SharedOptions } from "./types.js";
2
+ import { getBedrockOptions } from "./options/bedrock.js";
3
+ import { getVertexAiOptions } from "./options/vertexai.js";
4
+ import { getOpenAiOptions } from "./options/openai.js";
5
+ import { getGroqOptions } from "./options/groq.js";
6
+
7
+ export interface TextFallbackOptions {
8
+ _option_id: "text-fallback"; //For specific models should be format as "provider-model"
9
+ max_tokens?: number;
10
+ temperature?: number;
11
+ top_p?: number;
12
+ top_k?: number;
13
+ presence_penalty?: number;
14
+ frequency_penalty?: number;
15
+ stop_sequence?: string[];
16
+ }
17
+
18
+ export const textOptionsFallback: ModelOptionsInfo = {
19
+ _option_id: "text-fallback",
20
+ options: [
21
+ {
22
+ name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1,
23
+ integer: true, step: 200, description: "The maximum number of tokens to generate"
24
+ },
25
+ {
26
+ name: SharedOptions.temperature, type: OptionType.numeric, min: 0.0, default: 0.7,
27
+ integer: false, step: 0.1, description: "A higher temperature biases toward less likely tokens, making the model more creative"
28
+ },
29
+ {
30
+ name: SharedOptions.top_p, type: OptionType.numeric, min: 0, max: 1,
31
+ integer: false, step: 0.1, description: "Limits token sampling to the cumulative probability of the top p tokens"
32
+ },
33
+ {
34
+ name: SharedOptions.top_k, type: OptionType.numeric, min: 1,
35
+ integer: true, step: 1, description: "Limits token sampling to the top k tokens"
36
+ },
37
+ {
38
+ name: SharedOptions.presence_penalty, type: OptionType.numeric, min: -2.0, max: 2.0,
39
+ integer: false, step: 0.1, description: "Penalise tokens if they appear at least once in the text"
40
+ },
41
+ {
42
+ name: SharedOptions.frequency_penalty, type: OptionType.numeric, min: -2.0, max: 2.0,
43
+ integer: false, step: 0.1, description: "Penalise tokens based on their frequency in the text"
44
+ },
45
+ { name: SharedOptions.stop_sequence, type: OptionType.string_list, value: [], description: "The generation will halt if one of the stop sequences is output" },
46
+ ]
47
+ };
48
+
49
+ export function getOptions(provider?: string, model?: string, options?: ModelOptions): ModelOptionsInfo {
50
+ switch (provider) {
51
+ case "bedrock":
52
+ return getBedrockOptions(model ?? "", options);
53
+ case "vertexai":
54
+ return getVertexAiOptions(model ?? "", options);
55
+ case "openai":
56
+ return getOpenAiOptions(model ?? "", options);
57
+ case "groq":
58
+ return getGroqOptions(model ?? "", options);
59
+ default:
60
+ return textOptionsFallback;
61
+ }
62
+ }