@llumiverse/common 0.20.0 → 0.22.0-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. package/lib/cjs/capability/azure_foundry.js +159 -0
  2. package/lib/cjs/capability/azure_foundry.js.map +1 -0
  3. package/lib/cjs/capability/bedrock.js +5 -2
  4. package/lib/cjs/capability/bedrock.js.map +1 -1
  5. package/lib/cjs/capability/openai.js +2 -0
  6. package/lib/cjs/capability/openai.js.map +1 -1
  7. package/lib/cjs/capability/vertexai.js +1 -0
  8. package/lib/cjs/capability/vertexai.js.map +1 -1
  9. package/lib/cjs/capability.js +15 -3
  10. package/lib/cjs/capability.js.map +1 -1
  11. package/lib/cjs/options/azure_foundry.js +428 -0
  12. package/lib/cjs/options/azure_foundry.js.map +1 -0
  13. package/lib/cjs/options/bedrock.js +26 -0
  14. package/lib/cjs/options/bedrock.js.map +1 -1
  15. package/lib/cjs/options/openai.js +3 -0
  16. package/lib/cjs/options/openai.js.map +1 -1
  17. package/lib/cjs/options/vertexai.js +49 -2
  18. package/lib/cjs/options/vertexai.js.map +1 -1
  19. package/lib/cjs/options.js +16 -9
  20. package/lib/cjs/options.js.map +1 -1
  21. package/lib/cjs/types.js +146 -1
  22. package/lib/cjs/types.js.map +1 -1
  23. package/lib/esm/capability/azure_foundry.js +156 -0
  24. package/lib/esm/capability/azure_foundry.js.map +1 -0
  25. package/lib/esm/capability/bedrock.js +5 -2
  26. package/lib/esm/capability/bedrock.js.map +1 -1
  27. package/lib/esm/capability/openai.js +2 -0
  28. package/lib/esm/capability/openai.js.map +1 -1
  29. package/lib/esm/capability/vertexai.js +1 -0
  30. package/lib/esm/capability/vertexai.js.map +1 -1
  31. package/lib/esm/capability.js +15 -3
  32. package/lib/esm/capability.js.map +1 -1
  33. package/lib/esm/options/azure_foundry.js +424 -0
  34. package/lib/esm/options/azure_foundry.js.map +1 -0
  35. package/lib/esm/options/bedrock.js +26 -0
  36. package/lib/esm/options/bedrock.js.map +1 -1
  37. package/lib/esm/options/openai.js +3 -0
  38. package/lib/esm/options/openai.js.map +1 -1
  39. package/lib/esm/options/vertexai.js +49 -2
  40. package/lib/esm/options/vertexai.js.map +1 -1
  41. package/lib/esm/options.js +16 -9
  42. package/lib/esm/options.js.map +1 -1
  43. package/lib/esm/types.js +142 -0
  44. package/lib/esm/types.js.map +1 -1
  45. package/lib/tsconfig.tsbuildinfo +1 -0
  46. package/lib/types/capability/azure_foundry.d.ts +7 -0
  47. package/lib/types/capability/azure_foundry.d.ts.map +1 -0
  48. package/lib/types/capability/bedrock.d.ts.map +1 -1
  49. package/lib/types/capability/openai.d.ts.map +1 -1
  50. package/lib/types/capability/vertexai.d.ts.map +1 -1
  51. package/lib/types/capability.d.ts +3 -3
  52. package/lib/types/capability.d.ts.map +1 -1
  53. package/lib/types/options/azure_foundry.d.ts +52 -0
  54. package/lib/types/options/azure_foundry.d.ts.map +1 -0
  55. package/lib/types/options/bedrock.d.ts +8 -2
  56. package/lib/types/options/bedrock.d.ts.map +1 -1
  57. package/lib/types/options/openai.d.ts.map +1 -1
  58. package/lib/types/options/vertexai.d.ts.map +1 -1
  59. package/lib/types/options.d.ts +2 -2
  60. package/lib/types/options.d.ts.map +1 -1
  61. package/lib/types/types.d.ts +60 -10
  62. package/lib/types/types.d.ts.map +1 -1
  63. package/package.json +5 -5
  64. package/src/capability/azure_foundry.ts +180 -0
  65. package/src/capability/bedrock.ts +5 -2
  66. package/src/capability/openai.ts +2 -0
  67. package/src/capability/vertexai.ts +1 -0
  68. package/src/capability.ts +18 -7
  69. package/src/options/azure_foundry.ts +488 -0
  70. package/src/options/bedrock.ts +36 -2
  71. package/src/options/openai.ts +3 -0
  72. package/src/options/vertexai.ts +53 -6
  73. package/src/options.ts +17 -11
  74. package/src/types.ts +193 -17
@@ -0,0 +1,488 @@
1
+ import { ModelOptionsInfo, ModelOptionInfoItem, ModelOptions, OptionType, SharedOptions } from "../types.js";
2
+
3
+ // Helper function to parse composite model IDs
4
+ function parseAzureFoundryModelId(compositeId: string): { deploymentName: string; baseModel: string } {
5
+ const parts = compositeId.split('::');
6
+ if (parts.length === 2) {
7
+ return {
8
+ deploymentName: parts[0],
9
+ baseModel: parts[1]
10
+ };
11
+ }
12
+
13
+ // Backwards compatibility: if no delimiter found, treat as deployment name
14
+ return {
15
+ deploymentName: compositeId,
16
+ baseModel: compositeId
17
+ };
18
+ }
19
+
20
+ // Union type of all Azure Foundry options
21
+ export type AzureFoundryOptions = AzureFoundryOpenAIOptions | AzureFoundryDeepSeekOptions | AzureFoundryThinkingOptions | AzureFoundryTextOptions | AzureFoundryImageOptions;
22
+
23
+ export interface AzureFoundryOpenAIOptions {
24
+ _option_id: "azure-foundry-openai";
25
+ max_tokens?: number;
26
+ temperature?: number;
27
+ top_p?: number;
28
+ presence_penalty?: number;
29
+ frequency_penalty?: number;
30
+ stop_sequence?: string[];
31
+ image_detail?: "low" | "high" | "auto";
32
+ reasoning_effort?: "low" | "medium" | "high";
33
+ }
34
+
35
+ export interface AzureFoundryDeepSeekOptions {
36
+ _option_id: "azure-foundry-deepseek";
37
+ max_tokens?: number;
38
+ temperature?: number;
39
+ top_p?: number;
40
+ stop_sequence?: string[];
41
+ }
42
+
43
+ export interface AzureFoundryThinkingOptions {
44
+ _option_id: "azure-foundry-thinking";
45
+ max_tokens?: number;
46
+ temperature?: number;
47
+ top_p?: number;
48
+ stop_sequence?: string[];
49
+ reasoning_effort?: "low" | "medium" | "high";
50
+ image_detail?: "low" | "high" | "auto";
51
+ }
52
+
53
+ export interface AzureFoundryTextOptions {
54
+ _option_id: "azure-foundry-text";
55
+ max_tokens?: number;
56
+ temperature?: number;
57
+ top_p?: number;
58
+ top_k?: number;
59
+ presence_penalty?: number;
60
+ frequency_penalty?: number;
61
+ stop_sequence?: string[];
62
+ seed?: number;
63
+ }
64
+
65
+ export interface AzureFoundryImageOptions {
66
+ _option_id: "azure-foundry-image";
67
+ width?: number;
68
+ height?: number;
69
+ quality?: "standard" | "hd";
70
+ style?: "vivid" | "natural";
71
+ response_format?: "url" | "b64_json";
72
+ size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
73
+ }
74
+
75
+ export function getMaxTokensLimitAzureFoundry(model: string): number | undefined {
76
+ // Extract base model from composite ID (deployment::baseModel)
77
+ const { baseModel } = parseAzureFoundryModelId(model);
78
+ const modelLower = baseModel.toLowerCase();
79
+ // GPT models
80
+ if (modelLower.includes("gpt-4o")) {
81
+ if (modelLower.includes("mini")) {
82
+ return 16384;
83
+ }
84
+ return 16384;
85
+ }
86
+ if (modelLower.includes("gpt-4")) {
87
+ if (modelLower.includes("turbo")) {
88
+ return 4096;
89
+ }
90
+ if (modelLower.includes("32k")) {
91
+ return 32768;
92
+ }
93
+ return 8192;
94
+ }
95
+ if (modelLower.includes("gpt-35") || modelLower.includes("gpt-3.5")) {
96
+ return 4096;
97
+ }
98
+ if (model.includes("gpt-5")) {
99
+ return 128000;
100
+ }
101
+ // O-series models
102
+ if (modelLower.includes("o1")) {
103
+ if (modelLower.includes("preview")) {
104
+ return 32768;
105
+ }
106
+ if (modelLower.includes("mini")) {
107
+ return 65536;
108
+ }
109
+ return 100000;
110
+ }
111
+ if (modelLower.includes("o3")) {
112
+ if (modelLower.includes("mini")) {
113
+ return 100000;
114
+ }
115
+ return 100000;
116
+ }
117
+ if (modelLower.includes("o4")) {
118
+ return 100000;
119
+ }
120
+ // DeepSeek models
121
+ if (modelLower.includes("deepseek")) {
122
+ if (modelLower.includes("r1")) {
123
+ return 163840;
124
+ }
125
+ if (modelLower.includes("v3")) {
126
+ return 131072;
127
+ }
128
+ }
129
+ // Claude models
130
+ if (modelLower.includes("claude")) {
131
+ if (modelLower.includes("3-5") || modelLower.includes("3-7")) {
132
+ return 8192;
133
+ }
134
+ if (modelLower.includes("3")) {
135
+ return 4096;
136
+ }
137
+ return 4096;
138
+ }
139
+ // Llama models
140
+ if (modelLower.includes("llama")) {
141
+ if (modelLower.includes("3.1") || modelLower.includes("3.3")) {
142
+ return 8192;
143
+ }
144
+ if (modelLower.includes("4")) {
145
+ return 1000000; // 1M context
146
+ }
147
+ return 8192;
148
+ }
149
+ // Mistral models
150
+ if (modelLower.includes("mistral")) {
151
+ if (modelLower.includes("large")) {
152
+ return 4096;
153
+ }
154
+ if (modelLower.includes("small")) {
155
+ return 4096;
156
+ }
157
+ return 4096;
158
+ }
159
+ // Phi models
160
+ if (modelLower.includes("phi")) {
161
+ return 4096;
162
+ }
163
+ // AI21 Jamba models
164
+ if (modelLower.includes("jamba")) {
165
+ return 4096;
166
+ }
167
+ // Cohere models
168
+ if (modelLower.includes("cohere")) {
169
+ if (modelLower.includes("command-a")) {
170
+ return 8000;
171
+ }
172
+ return 4096;
173
+ }
174
+ // Grok models
175
+ if (modelLower.includes("grok")) {
176
+ return 131072;
177
+ }
178
+ return undefined;
179
+ }
180
+
181
+ export function getAzureFoundryOptions(model: string, _option?: ModelOptions): ModelOptionsInfo {
182
+ // Extract base model from composite ID (deployment::baseModel)
183
+ const { baseModel } = parseAzureFoundryModelId(model);
184
+ const modelLower = baseModel.toLowerCase();
185
+ const max_tokens_limit = getMaxTokensLimitAzureFoundry(model);
186
+ // Image generation models
187
+ if (modelLower.includes("dall-e") || modelLower.includes("gpt-image")) {
188
+ return {
189
+ _option_id: "azure-foundry-image",
190
+ options: [
191
+ {
192
+ name: "size",
193
+ type: OptionType.enum,
194
+ enum: {
195
+ "256x256": "256x256",
196
+ "512x512": "512x512",
197
+ "1024x1024": "1024x1024",
198
+ "1792x1024": "1792x1024",
199
+ "1024x1792": "1024x1792"
200
+ },
201
+ default: "1024x1024",
202
+ description: "The size of the generated image"
203
+ },
204
+ {
205
+ name: "quality",
206
+ type: OptionType.enum,
207
+ enum: { "Standard": "standard", "HD": "hd" },
208
+ default: "standard",
209
+ description: "The quality of the generated image"
210
+ },
211
+ {
212
+ name: "style",
213
+ type: OptionType.enum,
214
+ enum: { "Vivid": "vivid", "Natural": "natural" },
215
+ default: "vivid",
216
+ description: "The style of the generated image"
217
+ },
218
+ {
219
+ name: "response_format",
220
+ type: OptionType.enum,
221
+ enum: { "URL": "url", "Base64 JSON": "b64_json" },
222
+ default: "url",
223
+ description: "The format of the response"
224
+ }
225
+ ]
226
+ };
227
+ }
228
+ // Vision model options
229
+ const visionOptions: ModelOptionInfoItem[] = isVisionModel(modelLower) ? [
230
+ {
231
+ name: "image_detail",
232
+ type: OptionType.enum,
233
+ enum: { "Low": "low", "High": "high", "Auto": "auto" },
234
+ default: "auto",
235
+ description: "Controls how the model processes input images"
236
+ }
237
+ ] : [];
238
+ // O-series and thinking models
239
+ if (modelLower.includes("o1") || modelLower.includes("o3") || modelLower.includes("o4")) {
240
+ const reasoningOptions: ModelOptionInfoItem[] = (modelLower.includes("o3") || isO1Full(modelLower)) ? [
241
+ {
242
+ name: "reasoning_effort",
243
+ type: OptionType.enum,
244
+ enum: { "Low": "low", "Medium": "medium", "High": "high" },
245
+ default: "medium",
246
+ description: "How much effort the model should put into reasoning"
247
+ }
248
+ ] : [];
249
+ return {
250
+ _option_id: "azure-foundry-thinking",
251
+ options: [
252
+ {
253
+ name: SharedOptions.max_tokens,
254
+ type: OptionType.numeric,
255
+ min: 1,
256
+ max: max_tokens_limit,
257
+ integer: true,
258
+ description: "The maximum number of tokens to generate"
259
+ },
260
+ {
261
+ name: SharedOptions.temperature,
262
+ type: OptionType.numeric,
263
+ min: 0.0,
264
+ max: 2.0,
265
+ default: 1.0,
266
+ step: 0.1,
267
+ description: "Controls randomness in the output"
268
+ },
269
+ {
270
+ name: SharedOptions.top_p,
271
+ type: OptionType.numeric,
272
+ min: 0,
273
+ max: 1,
274
+ step: 0.1,
275
+ description: "Nucleus sampling parameter"
276
+ },
277
+ {
278
+ name: SharedOptions.stop_sequence,
279
+ type: OptionType.string_list,
280
+ value: [],
281
+ description: "Sequences where the model will stop generating"
282
+ },
283
+ ...reasoningOptions,
284
+ ...visionOptions
285
+ ]
286
+ };
287
+ }
288
+ // DeepSeek R1 models
289
+ if (modelLower.includes("deepseek") && modelLower.includes("r1")) {
290
+ return {
291
+ _option_id: "azure-foundry-deepseek",
292
+ options: [
293
+ {
294
+ name: SharedOptions.max_tokens,
295
+ type: OptionType.numeric,
296
+ min: 1,
297
+ max: max_tokens_limit,
298
+ integer: true,
299
+ description: "The maximum number of tokens to generate"
300
+ },
301
+ {
302
+ name: SharedOptions.temperature,
303
+ type: OptionType.numeric,
304
+ min: 0.0,
305
+ max: 2.0,
306
+ default: 0.7,
307
+ step: 0.1,
308
+ description: "Lower temperatures recommended for DeepSeek R1 (0.3-0.7)"
309
+ },
310
+ {
311
+ name: SharedOptions.top_p,
312
+ type: OptionType.numeric,
313
+ min: 0,
314
+ max: 1,
315
+ step: 0.1,
316
+ description: "Nucleus sampling parameter"
317
+ },
318
+ {
319
+ name: SharedOptions.stop_sequence,
320
+ type: OptionType.string_list,
321
+ value: [],
322
+ description: "Sequences where the model will stop generating"
323
+ }
324
+ ]
325
+ };
326
+ }
327
+ // OpenAI models (GPT-4, GPT-4o, GPT-3.5)
328
+ if (modelLower.includes("gpt-")) {
329
+ return {
330
+ _option_id: "azure-foundry-openai",
331
+ options: [
332
+ {
333
+ name: SharedOptions.max_tokens,
334
+ type: OptionType.numeric,
335
+ min: 1,
336
+ max: max_tokens_limit,
337
+ integer: true,
338
+ step: 200,
339
+ description: "The maximum number of tokens to generate"
340
+ },
341
+ {
342
+ name: SharedOptions.temperature,
343
+ type: OptionType.numeric,
344
+ min: 0.0,
345
+ max: 2.0,
346
+ default: 0.7,
347
+ step: 0.1,
348
+ description: "Controls randomness in the output"
349
+ },
350
+ {
351
+ name: SharedOptions.top_p,
352
+ type: OptionType.numeric,
353
+ min: 0,
354
+ max: 1,
355
+ step: 0.1,
356
+ description: "Nucleus sampling parameter"
357
+ },
358
+ {
359
+ name: SharedOptions.presence_penalty,
360
+ type: OptionType.numeric,
361
+ min: -2.0,
362
+ max: 2.0,
363
+ step: 0.1,
364
+ description: "Penalize new tokens based on their presence in the text"
365
+ },
366
+ {
367
+ name: SharedOptions.frequency_penalty,
368
+ type: OptionType.numeric,
369
+ min: -2.0,
370
+ max: 2.0,
371
+ step: 0.1,
372
+ description: "Penalize new tokens based on their frequency in the text"
373
+ },
374
+ {
375
+ name: SharedOptions.stop_sequence,
376
+ type: OptionType.string_list,
377
+ value: [],
378
+ description: "Sequences where the model will stop generating"
379
+ },
380
+ ...visionOptions
381
+ ]
382
+ };
383
+ }
384
+ // General text models (Claude, Llama, Mistral, Phi, etc.)
385
+ const baseOptions: ModelOptionInfoItem[] = [
386
+ {
387
+ name: SharedOptions.max_tokens,
388
+ type: OptionType.numeric,
389
+ min: 1,
390
+ max: max_tokens_limit,
391
+ integer: true,
392
+ step: 200,
393
+ description: "The maximum number of tokens to generate"
394
+ },
395
+ {
396
+ name: SharedOptions.temperature,
397
+ type: OptionType.numeric,
398
+ min: 0.0,
399
+ max: 2.0,
400
+ default: 0.7,
401
+ step: 0.1,
402
+ description: "Controls randomness in the output"
403
+ },
404
+ {
405
+ name: SharedOptions.top_p,
406
+ type: OptionType.numeric,
407
+ min: 0,
408
+ max: 1,
409
+ step: 0.1,
410
+ description: "Nucleus sampling parameter"
411
+ },
412
+ {
413
+ name: SharedOptions.stop_sequence,
414
+ type: OptionType.string_list,
415
+ value: [],
416
+ description: "Sequences where the model will stop generating"
417
+ }
418
+ ];
419
+ // Add model-specific options
420
+ const additionalOptions: ModelOptionInfoItem[] = [];
421
+ // Add top_k for certain models
422
+ if (modelLower.includes("claude") || modelLower.includes("mistral") || modelLower.includes("phi")) {
423
+ additionalOptions.push({
424
+ name: SharedOptions.top_k,
425
+ type: OptionType.numeric,
426
+ min: 1,
427
+ integer: true,
428
+ step: 1,
429
+ description: "Limits token sampling to the top k tokens"
430
+ });
431
+ }
432
+
433
+ // Add penalty options for certain models
434
+ if (modelLower.includes("claude") || modelLower.includes("jamba") || modelLower.includes("cohere")) {
435
+ additionalOptions.push(
436
+ {
437
+ name: SharedOptions.presence_penalty,
438
+ type: OptionType.numeric,
439
+ min: -2.0,
440
+ max: 2.0,
441
+ step: 0.1,
442
+ description: "Penalize new tokens based on their presence in the text"
443
+ },
444
+ {
445
+ name: SharedOptions.frequency_penalty,
446
+ type: OptionType.numeric,
447
+ min: -2.0,
448
+ max: 2.0,
449
+ step: 0.1,
450
+ description: "Penalize new tokens based on their frequency in the text"
451
+ }
452
+ );
453
+ }
454
+ // Add seed option for certain models
455
+ if (modelLower.includes("mistral") || modelLower.includes("phi") || modelLower.includes("gemini")) {
456
+ additionalOptions.push({
457
+ name: SharedOptions.seed,
458
+ type: OptionType.numeric,
459
+ integer: true,
460
+ description: "Random seed for reproducible generation"
461
+ });
462
+ }
463
+ return {
464
+ _option_id: "azure-foundry-text",
465
+ options: [
466
+ ...baseOptions,
467
+ ...additionalOptions,
468
+ ...visionOptions
469
+ ]
470
+ };
471
+ }
472
+
473
+ function isVisionModel(modelLower: string): boolean {
474
+ return modelLower.includes("gpt-4o") ||
475
+ modelLower.includes("gpt-4-turbo") ||
476
+ modelLower.includes("claude-3") ||
477
+ modelLower.includes("llama-3.2") ||
478
+ modelLower.includes("llama-4") ||
479
+ modelLower.includes("gemini") ||
480
+ isO1Full(modelLower);
481
+ }
482
+
483
+ function isO1Full(modelLower: string): boolean {
484
+ if (modelLower.includes("o1")) {
485
+ return !modelLower.includes("mini") && !modelLower.includes("preview");
486
+ }
487
+ return false;
488
+ }
@@ -2,7 +2,7 @@ import { ModelOptionsInfo, ModelOptions, OptionType, ModelOptionInfoItem } from
2
2
  import { textOptionsFallback } from "./fallback.js";
3
3
 
4
4
  // Union type of all Bedrock options
5
- export type BedrockOptions = NovaCanvasOptions | BaseConverseOptions | BedrockClaudeOptions | BedrockPalmyraOptions;
5
+ export type BedrockOptions = NovaCanvasOptions | BaseConverseOptions | BedrockClaudeOptions | BedrockPalmyraOptions | BedrockGptOssOptions;
6
6
 
7
7
  export interface NovaCanvasOptions {
8
8
  _option_id: "bedrock-nova-canvas"
@@ -21,7 +21,7 @@ export interface NovaCanvasOptions {
21
21
  }
22
22
 
23
23
  export interface BaseConverseOptions {
24
- _option_id: "bedrock-converse" | "bedrock-claude" | "bedrock-nova" | "bedrock-mistral" | "bedrock-ai21" | "bedrock-cohere-command" | "bedrock-palmyra";
24
+ _option_id: "bedrock-converse" | "bedrock-claude" | "bedrock-nova" | "bedrock-mistral" | "bedrock-ai21" | "bedrock-cohere-command" | "bedrock-palmyra" | "bedrock-gpt-oss";
25
25
  max_tokens?: number;
26
26
  temperature?: number;
27
27
  top_p?: number;
@@ -44,6 +44,13 @@ export interface BedrockPalmyraOptions extends BaseConverseOptions {
44
44
  presence_penalty?: number;
45
45
  }
46
46
 
47
+ export interface BedrockGptOssOptions extends BaseConverseOptions {
48
+ _option_id: "bedrock-gpt-oss";
49
+ reasoning_effort?: "low" | "medium" | "high";
50
+ frequency_penalty?: number;
51
+ presence_penalty?: number;
52
+ }
53
+
47
54
  export function getMaxTokensLimitBedrock(model: string): number | undefined {
48
55
  // Claude models
49
56
  if (model.includes("claude")) {
@@ -124,6 +131,10 @@ export function getMaxTokensLimitBedrock(model: string): number | undefined {
124
131
  return 8192;
125
132
  }
126
133
  }
134
+ // OpenAI gpt-oss models
135
+ if (model.includes("gpt-oss")) {
136
+ return 128000;
137
+ }
127
138
 
128
139
  // Default fallback
129
140
  return undefined;
@@ -442,6 +453,29 @@ export function getBedrockOptions(model: string, option?: ModelOptions): ModelOp
442
453
  options: [...baseConverseOptions, ...palmyraConverseOptions]
443
454
  }
444
455
  }
456
+ else if (model.includes("gpt-oss")) {
457
+ const gptOssOptions: ModelOptionInfoItem[] = [
458
+ {
459
+ name: "reasoning_effort",
460
+ type: OptionType.enum,
461
+ enum: {
462
+ "low": "low",
463
+ "medium": "medium",
464
+ "high": "high"
465
+ },
466
+ default: "medium",
467
+ description: "The reasoning effort of the model, which affects the quality and speed of the response"
468
+ },
469
+ ];
470
+
471
+ const baseConverseOptionsNoStop: ModelOptionInfoItem[] = [...baseConverseOptions];
472
+ // Remove stop_sequence for gpt-oss
473
+ baseConverseOptionsNoStop.splice(baseConverseOptionsNoStop.findIndex(o => o.name === "stop_sequence"), 1);
474
+ return {
475
+ _option_id: "bedrock-gpt-oss",
476
+ options: [...baseConverseOptionsNoStop, ...gptOssOptions]
477
+ };
478
+ }
445
479
 
446
480
  //Fallback to converse standard.
447
481
  return {
@@ -93,6 +93,9 @@ export function getOpenAiOptions(model: string, _option?: ModelOptions): ModelOp
93
93
  else if (model.includes("gpt-3-5")) {
94
94
  max_tokens_limit = 4096;
95
95
  }
96
+ else if (model.includes("gpt-5")) {
97
+ max_tokens_limit = 128000;
98
+ }
96
99
 
97
100
  //Is non-thinking text model
98
101
  const commonOptions: ModelOptionInfoItem[] = [
@@ -245,9 +245,53 @@ function getImagenOptions(model: string, option?: ModelOptions): ModelOptionsInf
245
245
  }
246
246
 
247
247
  function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsInfo {
248
+ // Special handling for gemini-2.5-flash-image
249
+ if (model.includes("gemini-2.5-flash-image")) {
250
+ const options: ModelOptionInfoItem[] = [
251
+ {
252
+ name: SharedOptions.temperature,
253
+ type: OptionType.numeric,
254
+ min: 0.0,
255
+ max: 2.0,
256
+ default: 0.7,
257
+ step: 0.01,
258
+ description: "Sampling temperature"
259
+ },
260
+ {
261
+ name: SharedOptions.top_p,
262
+ type: OptionType.numeric,
263
+ min: 0.0,
264
+ max: 1.0,
265
+ step: 0.01,
266
+ description: "Nucleus sampling probability"
267
+ },
268
+ {
269
+ name: "candidate_count",
270
+ type: OptionType.numeric,
271
+ min: 1,
272
+ max: 8,
273
+ default: 1,
274
+ integer: true,
275
+ description: "Number of candidates to generate"
276
+ },
277
+ {
278
+ name: SharedOptions.max_tokens,
279
+ type: OptionType.numeric,
280
+ min: 1,
281
+ max: 32768,
282
+ integer: true,
283
+ step: 200,
284
+ description: "Maximum output tokens"
285
+ }
286
+ ];
287
+ return {
288
+ _option_id: "vertexai-gemini",
289
+ options
290
+ };
291
+ }
248
292
  const max_tokens_limit = getGeminiMaxTokensLimit(model);
249
293
  const excludeOptions = ["max_tokens"];
250
- let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
294
+ const commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
251
295
 
252
296
  const max_tokens: ModelOptionInfoItem[] = [{
253
297
  name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
@@ -260,7 +304,7 @@ function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsIn
260
304
 
261
305
  if (model.includes("-2.5-")) {
262
306
  // Gemini 2.5 thinking models
263
-
307
+
264
308
  // Set budget token ranges based on model variant
265
309
  let budgetMin = -1;
266
310
  let budgetMax = 24576;
@@ -287,7 +331,7 @@ function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsIn
287
331
  "Range: 128-32768 tokens. " +
288
332
  "Cannot disable thinking - minimum 128 tokens. Set to -1 for dynamic thinking.";
289
333
  }
290
-
334
+
291
335
  const geminiThinkingOptions: ModelOptionInfoItem[] = [
292
336
  {
293
337
  name: "include_thoughts",
@@ -331,7 +375,7 @@ function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsIn
331
375
  function getClaudeOptions(model: string, option?: ModelOptions): ModelOptionsInfo {
332
376
  const max_tokens_limit = getClaudeMaxTokensLimit(model);
333
377
  const excludeOptions = ["max_tokens", "presence_penalty", "frequency_penalty"];
334
- let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
378
+ const commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
335
379
  const max_tokens: ModelOptionInfoItem[] = [{
336
380
  name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
337
381
  integer: true, step: 200, description: "The maximum number of tokens to generate"
@@ -391,7 +435,7 @@ function getLlamaOptions(model: string): ModelOptionsInfo {
391
435
  name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
392
436
  integer: true, step: 200, description: "The maximum number of tokens to generate"
393
437
  }];
394
-
438
+
395
439
  // Set max temperature to 1.0 for Llama models
396
440
  commonOptions = commonOptions.map((option) => {
397
441
  if (
@@ -416,6 +460,9 @@ function getLlamaOptions(model: string): ModelOptionsInfo {
416
460
  }
417
461
 
418
462
  function getGeminiMaxTokensLimit(model: string): number {
463
+ if (model.includes("gemini-2.5-flash-image")) {
464
+ return 32768;
465
+ }
419
466
  if (model.includes("thinking") || model.includes("-2.5-")) {
420
467
  return 65536;
421
468
  }
@@ -427,7 +474,7 @@ function getGeminiMaxTokensLimit(model: string): number {
427
474
 
428
475
  function getClaudeMaxTokensLimit(model: string): number {
429
476
  if (model.includes("-4-")) {
430
- if(model.includes("opus-")) {
477
+ if (model.includes("opus-")) {
431
478
  return 32768;
432
479
  }
433
480
  return 65536;