@llumiverse/core 0.17.0 → 0.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. package/README.md +1 -1
  2. package/lib/cjs/CompletionStream.js.map +1 -1
  3. package/lib/cjs/Driver.js +6 -6
  4. package/lib/cjs/Driver.js.map +1 -1
  5. package/lib/cjs/async.js +3 -3
  6. package/lib/cjs/async.js.map +1 -1
  7. package/lib/cjs/capability/bedrock.js +183 -0
  8. package/lib/cjs/capability/bedrock.js.map +1 -0
  9. package/lib/cjs/capability/openai.js +122 -0
  10. package/lib/cjs/capability/openai.js.map +1 -0
  11. package/lib/cjs/capability/vertexai.js +86 -0
  12. package/lib/cjs/capability/vertexai.js.map +1 -0
  13. package/lib/cjs/capability.js +52 -0
  14. package/lib/cjs/capability.js.map +1 -0
  15. package/lib/cjs/formatters/generic.js +6 -6
  16. package/lib/cjs/formatters/generic.js.map +1 -1
  17. package/lib/cjs/formatters/index.js.map +1 -1
  18. package/lib/cjs/formatters/nova.js +11 -11
  19. package/lib/cjs/formatters/nova.js.map +1 -1
  20. package/lib/cjs/formatters/openai.js +25 -11
  21. package/lib/cjs/formatters/openai.js.map +1 -1
  22. package/lib/cjs/index.js +2 -1
  23. package/lib/cjs/index.js.map +1 -1
  24. package/lib/cjs/json.js +1 -1
  25. package/lib/cjs/json.js.map +1 -1
  26. package/lib/cjs/options.js +8 -43
  27. package/lib/cjs/options.js.map +1 -1
  28. package/lib/cjs/resolver.js +2 -2
  29. package/lib/esm/CompletionStream.js.map +1 -1
  30. package/lib/esm/Driver.js +3 -3
  31. package/lib/esm/Driver.js.map +1 -1
  32. package/lib/esm/async.js +3 -3
  33. package/lib/esm/async.js.map +1 -1
  34. package/lib/esm/capability/bedrock.js +180 -0
  35. package/lib/esm/capability/bedrock.js.map +1 -0
  36. package/lib/esm/capability/openai.js +119 -0
  37. package/lib/esm/capability/openai.js.map +1 -0
  38. package/lib/esm/capability/vertexai.js +83 -0
  39. package/lib/esm/capability/vertexai.js.map +1 -0
  40. package/lib/esm/capability.js +47 -0
  41. package/lib/esm/capability.js.map +1 -0
  42. package/lib/esm/formatters/generic.js +1 -1
  43. package/lib/esm/formatters/generic.js.map +1 -1
  44. package/lib/esm/formatters/index.js.map +1 -1
  45. package/lib/esm/formatters/nova.js +5 -5
  46. package/lib/esm/formatters/nova.js.map +1 -1
  47. package/lib/esm/formatters/openai.js +17 -4
  48. package/lib/esm/formatters/openai.js.map +1 -1
  49. package/lib/esm/index.js +2 -1
  50. package/lib/esm/index.js.map +1 -1
  51. package/lib/esm/json.js +1 -1
  52. package/lib/esm/json.js.map +1 -1
  53. package/lib/esm/options.js +3 -37
  54. package/lib/esm/options.js.map +1 -1
  55. package/lib/esm/resolver.js +2 -2
  56. package/lib/types/CompletionStream.d.ts +1 -1
  57. package/lib/types/CompletionStream.d.ts.map +1 -1
  58. package/lib/types/Driver.d.ts +3 -3
  59. package/lib/types/Driver.d.ts.map +1 -1
  60. package/lib/types/async.d.ts +2 -2
  61. package/lib/types/async.d.ts.map +1 -1
  62. package/lib/types/capability/bedrock.d.ts +7 -0
  63. package/lib/types/capability/bedrock.d.ts.map +1 -0
  64. package/lib/types/capability/openai.d.ts +11 -0
  65. package/lib/types/capability/openai.d.ts.map +1 -0
  66. package/lib/types/capability/vertexai.d.ts +11 -0
  67. package/lib/types/capability/vertexai.d.ts.map +1 -0
  68. package/lib/types/capability.d.ts +5 -0
  69. package/lib/types/capability.d.ts.map +1 -0
  70. package/lib/types/formatters/commons.d.ts +1 -1
  71. package/lib/types/formatters/commons.d.ts.map +1 -1
  72. package/lib/types/formatters/generic.d.ts +2 -2
  73. package/lib/types/formatters/generic.d.ts.map +1 -1
  74. package/lib/types/formatters/index.d.ts +0 -3
  75. package/lib/types/formatters/index.d.ts.map +1 -1
  76. package/lib/types/formatters/nova.d.ts +2 -2
  77. package/lib/types/formatters/nova.d.ts.map +1 -1
  78. package/lib/types/formatters/openai.d.ts +3 -2
  79. package/lib/types/formatters/openai.d.ts.map +1 -1
  80. package/lib/types/index.d.ts +2 -1
  81. package/lib/types/index.d.ts.map +1 -1
  82. package/lib/types/json.d.ts +1 -7
  83. package/lib/types/json.d.ts.map +1 -1
  84. package/lib/types/options.d.ts +2 -13
  85. package/lib/types/options.d.ts.map +1 -1
  86. package/lib/types/validation.d.ts +1 -1
  87. package/lib/types/validation.d.ts.map +1 -1
  88. package/package.json +3 -2
  89. package/src/CompletionStream.ts +5 -5
  90. package/src/Driver.ts +5 -5
  91. package/src/async.ts +5 -8
  92. package/src/capability/bedrock.ts +187 -0
  93. package/src/capability/openai.ts +124 -0
  94. package/src/capability/vertexai.ts +88 -0
  95. package/src/capability.ts +49 -0
  96. package/src/formatters/commons.ts +1 -1
  97. package/src/formatters/generic.ts +2 -2
  98. package/src/formatters/index.ts +0 -5
  99. package/src/formatters/nova.ts +6 -6
  100. package/src/formatters/openai.ts +19 -5
  101. package/src/index.ts +3 -2
  102. package/src/json.ts +2 -10
  103. package/src/options.ts +12 -50
  104. package/src/resolver.ts +2 -2
  105. package/src/validation.ts +3 -3
  106. package/lib/cjs/options/bedrock.js +0 -343
  107. package/lib/cjs/options/bedrock.js.map +0 -1
  108. package/lib/cjs/options/groq.js +0 -37
  109. package/lib/cjs/options/groq.js.map +0 -1
  110. package/lib/cjs/options/openai.js +0 -123
  111. package/lib/cjs/options/openai.js.map +0 -1
  112. package/lib/cjs/options/vertexai.js +0 -257
  113. package/lib/cjs/options/vertexai.js.map +0 -1
  114. package/lib/cjs/types.js +0 -80
  115. package/lib/cjs/types.js.map +0 -1
  116. package/lib/esm/options/bedrock.js +0 -340
  117. package/lib/esm/options/bedrock.js.map +0 -1
  118. package/lib/esm/options/groq.js +0 -34
  119. package/lib/esm/options/groq.js.map +0 -1
  120. package/lib/esm/options/openai.js +0 -120
  121. package/lib/esm/options/openai.js.map +0 -1
  122. package/lib/esm/options/vertexai.js +0 -253
  123. package/lib/esm/options/vertexai.js.map +0 -1
  124. package/lib/esm/types.js +0 -77
  125. package/lib/esm/types.js.map +0 -1
  126. package/lib/types/options/bedrock.d.ts +0 -32
  127. package/lib/types/options/bedrock.d.ts.map +0 -1
  128. package/lib/types/options/groq.d.ts +0 -12
  129. package/lib/types/options/groq.d.ts.map +0 -1
  130. package/lib/types/options/openai.d.ts +0 -21
  131. package/lib/types/options/openai.d.ts.map +0 -1
  132. package/lib/types/options/vertexai.d.ts +0 -52
  133. package/lib/types/options/vertexai.d.ts.map +0 -1
  134. package/lib/types/types.d.ts +0 -323
  135. package/lib/types/types.d.ts.map +0 -1
  136. package/src/options/bedrock.ts +0 -388
  137. package/src/options/groq.ts +0 -47
  138. package/src/options/openai.ts +0 -148
  139. package/src/options/vertexai.ts +0 -312
  140. package/src/types.ts +0 -405
@@ -1,388 +0,0 @@
1
- import { ModelOptionsInfo, ModelOptions, OptionType, ModelOptionInfoItem } from "../types.js";
2
- import { textOptionsFallback } from "../options.js";
3
-
4
- // Union type of all Bedrock options
5
- export type BedrockOptions = NovaCanvasOptions | BaseConverseOptions | BedrockClaudeOptions;
6
-
7
- export interface NovaCanvasOptions {
8
- _option_id: "bedrock-nova-canvas"
9
- taskType: "TEXT_IMAGE" | "TEXT_IMAGE_WITH_IMAGE_CONDITIONING" | "COLOR_GUIDED_GENERATION" | "IMAGE_VARIATION" | "INPAINTING" | "OUTPAINTING" | "BACKGROUND_REMOVAL";
10
- width?: number;
11
- height?: number;
12
- quality?: "standard" | "premium";
13
- cfgScale?: number;
14
- seed?: number;
15
- numberOfImages?: number;
16
- controlMode?: "CANNY_EDGE" | "SEGMENTATION";
17
- controlStrength?: number;
18
- colors?: string[];
19
- similarityStrength?: number;
20
- outPaintingMode?: "DEFAULT" | "PRECISE";
21
- }
22
-
23
- export interface BaseConverseOptions {
24
- _option_id: "bedrock-converse" | "bedrock-claude" | "bedrock-nova" | "bedrock-mistral" | "bedrock-ai21" | "bedrock-cohere-command";
25
- max_tokens?: number;
26
- temperature?: number;
27
- top_p?: number;
28
- stop_sequence?: string[];
29
- }
30
-
31
- export interface BedrockClaudeOptions extends BaseConverseOptions {
32
- _option_id: "bedrock-claude";
33
- top_k?: number;
34
- thinking_mode?: boolean;
35
- thinking_budget_tokens?: number;
36
- }
37
-
38
- function getMaxTokensLimit(model: string, option?: ModelOptions): number | undefined {
39
- // Claude models
40
- if (model.includes("claude")) {
41
- if (model.includes("3-7")) {
42
- if (option && (option as BedrockClaudeOptions)?.thinking_mode) {
43
- return 128000;
44
- } else {
45
- return 8192;
46
- }
47
- }
48
- else if (model.includes("3-5")) {
49
- if (model.includes("claude-3-5-sonnet")) {
50
- return 4096;
51
- }
52
- return 8192;
53
- }
54
- else {
55
- return 4096;
56
- }
57
- }
58
- // Amazon models
59
- else if (model.includes("amazon")) {
60
- if (model.includes("titan")) {
61
- if (model.includes("lite")) {
62
- return 4096;
63
- } else if (model.includes("express")) {
64
- return 8192;
65
- } else if (model.includes("premier")) {
66
- return 3072;
67
- }
68
-
69
- }
70
- else if (model.includes("nova")) {
71
- return 5000;
72
- }
73
- }
74
- // Mistral models
75
- else if (model.includes("mistral")) {
76
- if (model.includes("8x7b")) {
77
- return 4096;
78
- }
79
- return 8192;
80
- }
81
- // AI21 models
82
- else if (model.includes("ai21")) {
83
- if (model.includes("j2")) {
84
- if (model.includes("large") || model.includes("mid") || model.includes("ultra")) {
85
- return 8191;
86
- }
87
- return 2048;
88
- }
89
- if (model.includes("jamba")) {
90
- return 4096;
91
- }
92
- }
93
- // Cohere models
94
- else if (model.includes("cohere.command")) {
95
- if (model.includes("command-r")) {
96
- return 128000;
97
- }
98
- return 4096;
99
- }
100
- // Meta models
101
- else if (model.includes("llama")) {
102
- if (model.includes("3-70b") || model.includes("3-8b")) {
103
- return 2048;
104
- }
105
- return 8192;
106
- }
107
-
108
- // Default fallback
109
- return undefined;
110
- }
111
-
112
- export function getBedrockOptions(model: string, option?: ModelOptions): ModelOptionsInfo {
113
- if (model.includes("canvas")) {
114
- const tasktypeList: ModelOptionInfoItem = {
115
- name: "taskType",
116
- type: OptionType.enum,
117
- enum: {
118
- "Text-To-Image": "TEXT_IMAGE",
119
- "Text-To-Image-with-Image-Conditioning": "TEXT_IMAGE_WITH_IMAGE_CONDITIONING",
120
- "Color-Guided-Generation": "COLOR_GUIDED_GENERATION",
121
- "Image-Variation": "IMAGE_VARIATION",
122
- "Inpainting": "INPAINTING",
123
- "Outpainting": "OUTPAINTING",
124
- "Background-Removal": "BACKGROUND_REMOVAL",
125
- },
126
- default: "TEXT_IMAGE",
127
- description: "The type of task to perform",
128
- refresh: true,
129
- };
130
-
131
- let otherOptions: ModelOptionInfoItem[] = [
132
- { name: "width", type: OptionType.numeric, min: 320, max: 4096, default: 512, step: 16, integer: true, description: "The width of the generated image" },
133
- { name: "height", type: OptionType.numeric, min: 320, max: 4096, default: 512, step: 16, integer: true, description: "The height of the generated image" },
134
- {
135
- name: "quality",
136
- type: OptionType.enum,
137
- enum: { "standard": "standard", "premium": "premium" },
138
- default: "standard",
139
- description: "The quality of the generated image"
140
- },
141
- { name: "cfgScale", type: OptionType.numeric, min: 1.1, max: 10.0, default: 6.5, step: 0.1, integer: false, description: "The scale of the generated image" },
142
- { name: "seed", type: OptionType.numeric, min: 0, max: 858993459, default: 12, integer: true, description: "The seed of the generated image" },
143
- { name: "numberOfImages", type: OptionType.numeric, min: 1, max: 5, default: 1, integer: true, description: "The number of images to generate" },
144
- ];
145
-
146
- let dependentOptions: ModelOptionInfoItem[] = [];
147
-
148
- switch ((option as NovaCanvasOptions)?.taskType ?? "TEXT_IMAGE") {
149
- case "TEXT_IMAGE_WITH_IMAGE_CONDITIONING":
150
- dependentOptions.push(
151
- {
152
- name: "controlMode", type: OptionType.enum, enum: { "CANNY_EDGE": "CANNY_EDGE", "SEGMENTATION": "SEGMENTATION" },
153
- default: "CANNY_EDGE", description: "The control mode of the generated image"
154
- },
155
- { name: "controlStrength", type: OptionType.numeric, min: 0, max: 1, default: 0.7, description: "The control strength of the generated image" },
156
- );
157
- break;
158
- case "COLOR_GUIDED_GENERATION":
159
- dependentOptions.push(
160
- { name: "colors", type: OptionType.string_list, value: [], description: "Hexadecimal color values to guide generation" },
161
- )
162
- break;
163
- case "IMAGE_VARIATION":
164
- dependentOptions.push(
165
- { name: "similarityStrength", type: OptionType.numeric, min: 0.2, max: 1, default: 0.7, description: "The similarity strength of the generated image" },
166
- )
167
- break;
168
- case "INPAINTING":
169
- //No changes
170
- break;
171
- case "OUTPAINTING":
172
- dependentOptions.push(
173
- {
174
- name: "outPaintingMode", type: OptionType.enum, enum: { "DEFAULT": "DEFAULT", "PRECISE": "PRECISE" },
175
- default: "default", description: "The outpainting mode of the generated image"
176
- },
177
- )
178
- break;
179
- case "BACKGROUND_REMOVAL":
180
- dependentOptions = [];
181
- otherOptions = [];
182
- break;
183
- }
184
-
185
- return {
186
- _option_id: "bedrock-nova-canvas",
187
- options: [
188
- tasktypeList,
189
- ...otherOptions,
190
- ...dependentOptions,
191
- ]
192
- };
193
- } else {
194
- const max_tokens_limit = getMaxTokensLimit(model, option);
195
- //Not canvas, i.e normal AWS bedrock converse
196
- const baseConverseOptions: ModelOptionInfoItem[] = [
197
- {
198
- name: "max_tokens",
199
- type: OptionType.numeric,
200
- min: 1,
201
- max: max_tokens_limit,
202
- integer: true,
203
- step: 200,
204
- description: "The maximum number of tokens to generate",
205
- },
206
- {
207
- name: "temperature",
208
- type: OptionType.numeric,
209
- min: 0.0,
210
- default: 0.7,
211
- step: 0.1,
212
- description: "A higher temperature biases toward less likely tokens, making the model more creative"
213
- },
214
- {
215
- name: "top_p",
216
- type: OptionType.numeric,
217
- min: 0,
218
- max: 1,
219
- step: 0.1,
220
- description: "Limits token sampling to the cumulative probability of the top p tokens"
221
- },
222
- {
223
- name: "stop_sequence",
224
- type: OptionType.string_list,
225
- value: [],
226
- description: "The generation will halt if one of the stop sequences is output"
227
- }];
228
-
229
- if (model.includes("claude")) {
230
- const claudeConverseOptions: ModelOptionInfoItem[] = [
231
- {
232
- name: "top_k",
233
- type: OptionType.numeric,
234
- min: 1,
235
- integer: true,
236
- step: 1,
237
- description: "Limits token sampling to the top k tokens"
238
- },
239
- ];
240
- if (model.includes("3-7")) {
241
- const claudeModeOptions: ModelOptionInfoItem[] = [
242
- {
243
- name: "thinking_mode",
244
- type: OptionType.boolean,
245
- default: false,
246
- description: "If true, use the extended reasoning mode"
247
- },
248
- ];
249
- const claudeThinkingOptions: ModelOptionInfoItem[] = (option as BedrockClaudeOptions)?.thinking_mode ? [
250
- {
251
- name: "thinking_budget_tokens",
252
- type: OptionType.numeric,
253
- min: 1024,
254
- default: 4000,
255
- integer: true,
256
- step: 100,
257
- description: "The target number of tokens to use for reasoning, not a hard limit."
258
- },
259
- ] : [];
260
-
261
- return {
262
- _option_id: "bedrock-claude",
263
- options: [
264
- ...baseConverseOptions,
265
- ...claudeConverseOptions,
266
- ...claudeModeOptions,
267
- ...claudeThinkingOptions]
268
- }
269
- }
270
- return {
271
- _option_id: "bedrock-claude",
272
- options: [...baseConverseOptions, ...claudeConverseOptions]
273
- }
274
- }
275
- else if (model.includes("amazon")) {
276
- //Titan models also exists but does not support any additional options
277
- if (model.includes("nova")) {
278
- const novaConverseOptions: ModelOptionInfoItem[] = [
279
- {
280
- name: "top_k",
281
- type: OptionType.numeric,
282
- min: 1,
283
- integer: true,
284
- step: 1,
285
- description: "Limits token sampling to the top k tokens"
286
- },
287
- ];
288
- return {
289
- _option_id: "bedrock-nova",
290
- options: [...baseConverseOptions, ...novaConverseOptions]
291
- }
292
- }
293
- }
294
- else if (model.includes("mistral")) {
295
- //7b and 8x7b instruct
296
- if (model.includes("7b")) {
297
- const mistralConverseOptions: ModelOptionInfoItem[] = [
298
- {
299
- name: "top_k",
300
- type: OptionType.numeric,
301
- min: 1,
302
- integer: true,
303
- step: 1,
304
- description: "Limits token sampling to the top k tokens"
305
- },
306
- ];
307
- return {
308
- _option_id: "bedrock-mistral",
309
- options: [...baseConverseOptions, ...mistralConverseOptions]
310
- }
311
- }
312
- //Other models such as Mistral Small, Large and Large 2
313
- //Support no additional options
314
- }
315
- else if (model.includes("ai21")) {
316
- const ai21ConverseOptions: ModelOptionInfoItem[] = [
317
- {
318
- name: "presence_penalty",
319
- type: OptionType.numeric,
320
- min: -2,
321
- max: 2,
322
- default: 0,
323
- step: 0.1,
324
- description: "A higher presence penalty encourages the model to talk about new topics"
325
- },
326
- {
327
- name: "frequency_penalty",
328
- type: OptionType.numeric,
329
- min: -2,
330
- max: 2,
331
- default: 0,
332
- step: 0.1,
333
- description: "A higher frequency penalty encourages the model to use less common words"
334
- },
335
- ];
336
-
337
- return {
338
- _option_id: "bedrock-ai21",
339
- options: [...baseConverseOptions, ...ai21ConverseOptions]
340
- }
341
- }
342
- else if (model.includes("cohere.command")) {
343
- const cohereCommandOptions: ModelOptionInfoItem[] = [
344
- {
345
- name: "top_k",
346
- type: OptionType.numeric,
347
- min: 1,
348
- integer: true,
349
- step: 1,
350
- description: "Limits token sampling to the top k tokens"
351
- },
352
- ];
353
- if (model.includes("command-r")) {
354
- const cohereCommandROptions: ModelOptionInfoItem[] = [
355
- {
356
- name: "frequency_penalty",
357
- type: OptionType.numeric,
358
- min: -2,
359
- max: 2,
360
- default: 0,
361
- step: 0.1,
362
- description: "A higher frequency penalty encourages the model to use less common words"
363
- },
364
- {
365
- name: "presence_penalty",
366
- type: OptionType.numeric,
367
- min: -2,
368
- max: 2,
369
- default: 0,
370
- step: 0.1,
371
- description: "A higher presence penalty encourages the model to talk about new topics"
372
- },
373
- ];
374
- return {
375
- _option_id: "bedrock-cohere-command",
376
- options: [...baseConverseOptions, ...cohereCommandOptions, ...cohereCommandROptions]
377
- }
378
- }
379
- }
380
-
381
- //Fallback to converse standard.
382
- return {
383
- _option_id: "bedrock-converse",
384
- options: baseConverseOptions
385
- };
386
- }
387
- return textOptionsFallback;
388
- }
@@ -1,47 +0,0 @@
1
- import { ModelOptionsInfo, ModelOptionInfoItem, ModelOptions, OptionType, SharedOptions } from "../types.js";
2
- import { textOptionsFallback } from "../options.js";
3
-
4
- // Union type of all Bedrock options
5
- export type GroqOptions = GroqDeepseekThinkingOptions;
6
-
7
- export interface GroqDeepseekThinkingOptions {
8
- _option_id: "groq-deepseek-thinking",
9
- max_tokens?: number,
10
- temperature?: number,
11
- top_p?: number,
12
- stop_sequence?: string[],
13
- reasonsing_format: 'parsed' | 'raw' | 'hidden',
14
- }
15
-
16
- export function getGroqOptions(model: string, _option?: ModelOptions): ModelOptionsInfo {
17
- if (model.includes("deepseek") && model.includes("r1")) {
18
- const commonOptions: ModelOptionInfoItem[] = [
19
- {
20
- name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: 131072,
21
- integer: true, description: "The maximum number of tokens to generate",
22
- },
23
- {
24
- name: SharedOptions.temperature, type: OptionType.numeric, min: 0.0, default: 0.7, max: 2.0,
25
- integer: false, step: 0.1, description: "A higher temperature biases toward less likely tokens, making the model more creative. A lower temperature than other models is recommended for deepseek R1, 0.3-0.7 approximately.",
26
- },
27
- {
28
- name: SharedOptions.top_p, type: OptionType.numeric, min: 0, max: 1,
29
- integer: false, step: 0.1, description: "Limits token sampling to the cumulative probability of the top p tokens",
30
- },
31
- {
32
- name: SharedOptions.stop_sequence, type: OptionType.string_list, value: [],
33
- description: "The generation will halt if one of the stop sequences is output",
34
- },
35
- {
36
- name: "reasoning_format", type: OptionType.enum, enum: { "Parsed": "parsed", "Raw": "raw", "Hidden": "hidden" },
37
- default: "parsed", description: "Controls how the reasoning is returned.",
38
- },
39
- ];
40
-
41
- return {
42
- _option_id: "groq-deepseek-thinking",
43
- options: commonOptions,
44
- };
45
- }
46
- return textOptionsFallback;
47
- }
@@ -1,148 +0,0 @@
1
- import { ModelOptionsInfo, ModelOptionInfoItem, ModelOptions, OptionType, SharedOptions } from "../types.js";
2
- import { textOptionsFallback } from "../options.js";
3
-
4
- // Union type of all Bedrock options
5
- export type OpenAiOptions = OpenAiThinkingOptions | OpenAiTextOptions;
6
-
7
- export interface OpenAiThinkingOptions {
8
- _option_id: "openai-thinking",
9
- max_tokens?: number,
10
- stop_sequence?: string[],
11
- reasoning_effort?: "low" | "medium" | "high",
12
- image_detail?: "low" | "high" | "auto",
13
- }
14
-
15
- export interface OpenAiTextOptions {
16
- _option_id: "openai-text",
17
- max_tokens?: number,
18
- temperature?: number,
19
- top_p?: number,
20
- presence_penalty?: number,
21
- frequency_penalty?: number,
22
- stop_sequence?: string[],
23
- image_detail?: "low" | "high" | "auto",
24
- }
25
-
26
- export function getOpenAiOptions(model: string, _option?: ModelOptions): ModelOptionsInfo {
27
- const visionOptions: ModelOptionInfoItem[] = isVisionModel(model) ? [
28
- {
29
- name: "image_detail", type: OptionType.enum, enum: { "Low": "low", "High": "high", "Auto": "auto" },
30
- default: "auto", description: "Controls how the model processes an input image."
31
- },
32
- ] : [];
33
-
34
- if (model.includes("o1") || model.includes("o3")) {
35
- //Is thinking text model
36
- let max_tokens_limit = 4096;
37
- if (model.includes("o1")) {
38
- if (model.includes("preview")) {
39
- max_tokens_limit = 32768;
40
- }
41
- else if (model.includes("mini")) {
42
- max_tokens_limit = 65536;
43
- }
44
- else {
45
- max_tokens_limit = 100000;
46
- }
47
- }
48
- else if (model.includes("o3")) {
49
- max_tokens_limit = 100000;
50
- }
51
-
52
- const commonOptions: ModelOptionInfoItem[] = [
53
- {
54
- name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
55
- integer: true, description: "The maximum number of tokens to generate",
56
- },
57
- {
58
- name: SharedOptions.stop_sequence, type: OptionType.string_list, value: [],
59
- description: "The stop sequence of the generated image",
60
- },
61
- ];
62
-
63
- const reasoningOptions: ModelOptionInfoItem[] = model.includes("o3") || isO1Full(model) ? [
64
- {
65
- name: "reasoning_effort", type: OptionType.enum, enum: { "Low": "low", "Medium": "medium", "High": "high" },
66
- default: "medium", description: "How much effort the model should put into reasoning, lower values result in faster responses and less tokens used."
67
- },
68
- ] : [];
69
-
70
- return {
71
- _option_id: "openai-thinking",
72
- options: [
73
- ...commonOptions,
74
- ...reasoningOptions,
75
- ...visionOptions,
76
- ],
77
- };
78
- } else {
79
- let max_tokens_limit = 4096;
80
- if (model.includes("gpt-4o")) {
81
- max_tokens_limit = 16384;
82
- if (model.includes("gpt-4o-2024-05-13") || model.includes("realtime")) {
83
- max_tokens_limit = 4096;
84
- }
85
- }
86
- else if (model.includes("gpt-4")) {
87
- if (model.includes("turbo")) {
88
- max_tokens_limit = 4096;
89
- } else {
90
- max_tokens_limit = 8192;
91
- }
92
- }
93
- else if (model.includes("gpt-3-5")) {
94
- max_tokens_limit = 4096;
95
- }
96
-
97
- //Is non-thinking text model
98
- const commonOptions: ModelOptionInfoItem[] = [
99
- {
100
- name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
101
- integer: true, step: 200, description: "The maximum number of tokens to generate",
102
- },
103
- {
104
- name: "temperature", type: OptionType.numeric, min: 0.0, max: 2.0, default: 0.7,
105
- integer: false, step: 0.1, description: "A higher temperature biases toward less likely tokens, making the model more creative"
106
- },
107
- {
108
- name: "top_p", type: OptionType.numeric, min: 0, max: 1,
109
- integer: false, step: 0.1, description: "Limits token sampling to the cumulative probability of the top p tokens"
110
- },
111
- {
112
- name: "presence_penalty", type: OptionType.numeric, min: -2.0, max: 2.0,
113
- integer: false, step: 0.1, description: "Penalise tokens if they appear at least once in the text"
114
- },
115
- {
116
- name: "frequency_penalty", type: OptionType.numeric, min: -2.0, max: 2.0,
117
- integer: false, step: 0.1, description: "Penalise tokens based on their frequency in the text"
118
- },
119
- {
120
- name: SharedOptions.stop_sequence, type: OptionType.string_list, value: [],
121
- description: "The generation will halt if one of the stop sequences is output",
122
- }
123
- ]
124
-
125
- return {
126
- _option_id: "openai-text",
127
- options: [
128
- ...commonOptions,
129
- ...visionOptions,
130
- ],
131
- }
132
- }
133
- return textOptionsFallback;
134
- }
135
-
136
- function isO1Full(model: string): boolean {
137
- if (model.includes("o1")) {
138
- if (model.includes("mini") || model.includes("preview")) {
139
- return false;
140
- }
141
- return true;
142
- }
143
- return false;
144
- }
145
-
146
- function isVisionModel(model: string): boolean {
147
- return model.includes("gpt-4o") || isO1Full(model) || model.includes("gpt-4-turbo");
148
- }