@llumiverse/common 0.21.0 → 0.22.0-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/lib/cjs/capability/azure_foundry.js +1 -0
  2. package/lib/cjs/capability/azure_foundry.js.map +1 -1
  3. package/lib/cjs/capability/bedrock.js +5 -2
  4. package/lib/cjs/capability/bedrock.js.map +1 -1
  5. package/lib/cjs/capability/openai.js +2 -0
  6. package/lib/cjs/capability/openai.js.map +1 -1
  7. package/lib/cjs/capability/vertexai.js +1 -0
  8. package/lib/cjs/capability/vertexai.js.map +1 -1
  9. package/lib/cjs/capability.js +7 -0
  10. package/lib/cjs/capability.js.map +1 -1
  11. package/lib/cjs/options/azure_foundry.js +3 -0
  12. package/lib/cjs/options/azure_foundry.js.map +1 -1
  13. package/lib/cjs/options/bedrock.js +26 -0
  14. package/lib/cjs/options/bedrock.js.map +1 -1
  15. package/lib/cjs/options/openai.js +3 -0
  16. package/lib/cjs/options/openai.js.map +1 -1
  17. package/lib/cjs/options/vertexai.js +49 -2
  18. package/lib/cjs/options/vertexai.js.map +1 -1
  19. package/lib/cjs/types.js +52 -1
  20. package/lib/cjs/types.js.map +1 -1
  21. package/lib/esm/capability/azure_foundry.js +1 -0
  22. package/lib/esm/capability/azure_foundry.js.map +1 -1
  23. package/lib/esm/capability/bedrock.js +5 -2
  24. package/lib/esm/capability/bedrock.js.map +1 -1
  25. package/lib/esm/capability/openai.js +2 -0
  26. package/lib/esm/capability/openai.js.map +1 -1
  27. package/lib/esm/capability/vertexai.js +1 -0
  28. package/lib/esm/capability/vertexai.js.map +1 -1
  29. package/lib/esm/capability.js +7 -0
  30. package/lib/esm/capability.js.map +1 -1
  31. package/lib/esm/options/azure_foundry.js +3 -0
  32. package/lib/esm/options/azure_foundry.js.map +1 -1
  33. package/lib/esm/options/bedrock.js +26 -0
  34. package/lib/esm/options/bedrock.js.map +1 -1
  35. package/lib/esm/options/openai.js +3 -0
  36. package/lib/esm/options/openai.js.map +1 -1
  37. package/lib/esm/options/vertexai.js +49 -2
  38. package/lib/esm/options/vertexai.js.map +1 -1
  39. package/lib/esm/types.js +49 -1
  40. package/lib/esm/types.js.map +1 -1
  41. package/lib/tsconfig.tsbuildinfo +1 -1
  42. package/lib/types/capability/azure_foundry.d.ts.map +1 -1
  43. package/lib/types/capability/bedrock.d.ts.map +1 -1
  44. package/lib/types/capability/openai.d.ts.map +1 -1
  45. package/lib/types/capability/vertexai.d.ts.map +1 -1
  46. package/lib/types/capability.d.ts.map +1 -1
  47. package/lib/types/options/azure_foundry.d.ts.map +1 -1
  48. package/lib/types/options/bedrock.d.ts +8 -2
  49. package/lib/types/options/bedrock.d.ts.map +1 -1
  50. package/lib/types/options/openai.d.ts.map +1 -1
  51. package/lib/types/options/vertexai.d.ts.map +1 -1
  52. package/lib/types/types.d.ts +38 -10
  53. package/lib/types/types.d.ts.map +1 -1
  54. package/package.json +5 -5
  55. package/src/capability/azure_foundry.ts +1 -0
  56. package/src/capability/bedrock.ts +5 -2
  57. package/src/capability/openai.ts +2 -0
  58. package/src/capability/vertexai.ts +1 -0
  59. package/src/capability.ts +7 -0
  60. package/src/options/azure_foundry.ts +3 -0
  61. package/src/options/bedrock.ts +36 -2
  62. package/src/options/openai.ts +3 -0
  63. package/src/options/vertexai.ts +53 -6
  64. package/src/types.ts +81 -18
@@ -95,6 +95,9 @@ export function getMaxTokensLimitAzureFoundry(model: string): number | undefined
95
95
  if (modelLower.includes("gpt-35") || modelLower.includes("gpt-3.5")) {
96
96
  return 4096;
97
97
  }
98
+ if (model.includes("gpt-5")) {
99
+ return 128000;
100
+ }
98
101
  // O-series models
99
102
  if (modelLower.includes("o1")) {
100
103
  if (modelLower.includes("preview")) {
@@ -2,7 +2,7 @@ import { ModelOptionsInfo, ModelOptions, OptionType, ModelOptionInfoItem } from
2
2
  import { textOptionsFallback } from "./fallback.js";
3
3
 
4
4
  // Union type of all Bedrock options
5
- export type BedrockOptions = NovaCanvasOptions | BaseConverseOptions | BedrockClaudeOptions | BedrockPalmyraOptions;
5
+ export type BedrockOptions = NovaCanvasOptions | BaseConverseOptions | BedrockClaudeOptions | BedrockPalmyraOptions | BedrockGptOssOptions;
6
6
 
7
7
  export interface NovaCanvasOptions {
8
8
  _option_id: "bedrock-nova-canvas"
@@ -21,7 +21,7 @@ export interface NovaCanvasOptions {
21
21
  }
22
22
 
23
23
  export interface BaseConverseOptions {
24
- _option_id: "bedrock-converse" | "bedrock-claude" | "bedrock-nova" | "bedrock-mistral" | "bedrock-ai21" | "bedrock-cohere-command" | "bedrock-palmyra";
24
+ _option_id: "bedrock-converse" | "bedrock-claude" | "bedrock-nova" | "bedrock-mistral" | "bedrock-ai21" | "bedrock-cohere-command" | "bedrock-palmyra" | "bedrock-gpt-oss";
25
25
  max_tokens?: number;
26
26
  temperature?: number;
27
27
  top_p?: number;
@@ -44,6 +44,13 @@ export interface BedrockPalmyraOptions extends BaseConverseOptions {
44
44
  presence_penalty?: number;
45
45
  }
46
46
 
47
+ export interface BedrockGptOssOptions extends BaseConverseOptions {
48
+ _option_id: "bedrock-gpt-oss";
49
+ reasoning_effort?: "low" | "medium" | "high";
50
+ frequency_penalty?: number;
51
+ presence_penalty?: number;
52
+ }
53
+
47
54
  export function getMaxTokensLimitBedrock(model: string): number | undefined {
48
55
  // Claude models
49
56
  if (model.includes("claude")) {
@@ -124,6 +131,10 @@ export function getMaxTokensLimitBedrock(model: string): number | undefined {
124
131
  return 8192;
125
132
  }
126
133
  }
134
+ // OpenAI gpt-oss models
135
+ if (model.includes("gpt-oss")) {
136
+ return 128000;
137
+ }
127
138
 
128
139
  // Default fallback
129
140
  return undefined;
@@ -442,6 +453,29 @@ export function getBedrockOptions(model: string, option?: ModelOptions): ModelOp
442
453
  options: [...baseConverseOptions, ...palmyraConverseOptions]
443
454
  }
444
455
  }
456
+ else if (model.includes("gpt-oss")) {
457
+ const gptOssOptions: ModelOptionInfoItem[] = [
458
+ {
459
+ name: "reasoning_effort",
460
+ type: OptionType.enum,
461
+ enum: {
462
+ "low": "low",
463
+ "medium": "medium",
464
+ "high": "high"
465
+ },
466
+ default: "medium",
467
+ description: "The reasoning effort of the model, which affects the quality and speed of the response"
468
+ },
469
+ ];
470
+
471
+ const baseConverseOptionsNoStop: ModelOptionInfoItem[] = [...baseConverseOptions];
472
+ // Remove stop_sequence for gpt-oss
473
+ baseConverseOptionsNoStop.splice(baseConverseOptionsNoStop.findIndex(o => o.name === "stop_sequence"), 1);
474
+ return {
475
+ _option_id: "bedrock-gpt-oss",
476
+ options: [...baseConverseOptionsNoStop, ...gptOssOptions]
477
+ };
478
+ }
445
479
 
446
480
  //Fallback to converse standard.
447
481
  return {
@@ -93,6 +93,9 @@ export function getOpenAiOptions(model: string, _option?: ModelOptions): ModelOp
93
93
  else if (model.includes("gpt-3-5")) {
94
94
  max_tokens_limit = 4096;
95
95
  }
96
+ else if (model.includes("gpt-5")) {
97
+ max_tokens_limit = 128000;
98
+ }
96
99
 
97
100
  //Is non-thinking text model
98
101
  const commonOptions: ModelOptionInfoItem[] = [
@@ -245,9 +245,53 @@ function getImagenOptions(model: string, option?: ModelOptions): ModelOptionsInf
245
245
  }
246
246
 
247
247
  function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsInfo {
248
+ // Special handling for gemini-2.5-flash-image
249
+ if (model.includes("gemini-2.5-flash-image")) {
250
+ const options: ModelOptionInfoItem[] = [
251
+ {
252
+ name: SharedOptions.temperature,
253
+ type: OptionType.numeric,
254
+ min: 0.0,
255
+ max: 2.0,
256
+ default: 0.7,
257
+ step: 0.01,
258
+ description: "Sampling temperature"
259
+ },
260
+ {
261
+ name: SharedOptions.top_p,
262
+ type: OptionType.numeric,
263
+ min: 0.0,
264
+ max: 1.0,
265
+ step: 0.01,
266
+ description: "Nucleus sampling probability"
267
+ },
268
+ {
269
+ name: "candidate_count",
270
+ type: OptionType.numeric,
271
+ min: 1,
272
+ max: 8,
273
+ default: 1,
274
+ integer: true,
275
+ description: "Number of candidates to generate"
276
+ },
277
+ {
278
+ name: SharedOptions.max_tokens,
279
+ type: OptionType.numeric,
280
+ min: 1,
281
+ max: 32768,
282
+ integer: true,
283
+ step: 200,
284
+ description: "Maximum output tokens"
285
+ }
286
+ ];
287
+ return {
288
+ _option_id: "vertexai-gemini",
289
+ options
290
+ };
291
+ }
248
292
  const max_tokens_limit = getGeminiMaxTokensLimit(model);
249
293
  const excludeOptions = ["max_tokens"];
250
- let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
294
+ const commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
251
295
 
252
296
  const max_tokens: ModelOptionInfoItem[] = [{
253
297
  name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
@@ -260,7 +304,7 @@ function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsIn
260
304
 
261
305
  if (model.includes("-2.5-")) {
262
306
  // Gemini 2.5 thinking models
263
-
307
+
264
308
  // Set budget token ranges based on model variant
265
309
  let budgetMin = -1;
266
310
  let budgetMax = 24576;
@@ -287,7 +331,7 @@ function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsIn
287
331
  "Range: 128-32768 tokens. " +
288
332
  "Cannot disable thinking - minimum 128 tokens. Set to -1 for dynamic thinking.";
289
333
  }
290
-
334
+
291
335
  const geminiThinkingOptions: ModelOptionInfoItem[] = [
292
336
  {
293
337
  name: "include_thoughts",
@@ -331,7 +375,7 @@ function getGeminiOptions(model: string, _option?: ModelOptions): ModelOptionsIn
331
375
  function getClaudeOptions(model: string, option?: ModelOptions): ModelOptionsInfo {
332
376
  const max_tokens_limit = getClaudeMaxTokensLimit(model);
333
377
  const excludeOptions = ["max_tokens", "presence_penalty", "frequency_penalty"];
334
- let commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
378
+ const commonOptions = textOptionsFallback.options.filter((option) => !excludeOptions.includes(option.name));
335
379
  const max_tokens: ModelOptionInfoItem[] = [{
336
380
  name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
337
381
  integer: true, step: 200, description: "The maximum number of tokens to generate"
@@ -391,7 +435,7 @@ function getLlamaOptions(model: string): ModelOptionsInfo {
391
435
  name: SharedOptions.max_tokens, type: OptionType.numeric, min: 1, max: max_tokens_limit,
392
436
  integer: true, step: 200, description: "The maximum number of tokens to generate"
393
437
  }];
394
-
438
+
395
439
  // Set max temperature to 1.0 for Llama models
396
440
  commonOptions = commonOptions.map((option) => {
397
441
  if (
@@ -416,6 +460,9 @@ function getLlamaOptions(model: string): ModelOptionsInfo {
416
460
  }
417
461
 
418
462
  function getGeminiMaxTokensLimit(model: string): number {
463
+ if (model.includes("gemini-2.5-flash-image")) {
464
+ return 32768;
465
+ }
419
466
  if (model.includes("thinking") || model.includes("-2.5-")) {
420
467
  return 65536;
421
468
  }
@@ -427,7 +474,7 @@ function getGeminiMaxTokensLimit(model: string): number {
427
474
 
428
475
  function getClaudeMaxTokensLimit(model: string): number {
429
476
  if (model.includes("-4-")) {
430
- if(model.includes("opus-")) {
477
+ if (model.includes("opus-")) {
431
478
  return 32768;
432
479
  }
433
480
  return 65536;
package/src/types.ts CHANGED
@@ -64,7 +64,7 @@ export const ProviderList: Record<Providers, ProviderParams> = {
64
64
  replicate:
65
65
  {
66
66
  id: Providers.replicate,
67
- name: "Repicate",
67
+ name: "Replicate",
68
68
  requiresApiKey: true,
69
69
  requiresEndpointUrl: false,
70
70
  supportSearch: true,
@@ -152,20 +152,90 @@ export interface EmbeddingsResult {
152
152
  export interface ResultValidationError {
153
153
  code: 'validation_error' | 'json_error' | 'content_policy_violation';
154
154
  message: string;
155
- data?: string;
155
+ data?: CompletionResult[];
156
+ }
157
+
158
+ // ============== Result Types ===============
159
+
160
+ export interface BaseResult {
161
+ type: "text" | "json" | "image";
162
+ value: any;
163
+ }
164
+
165
+ export interface TextResult extends BaseResult {
166
+ type: "text";
167
+ value: string;
168
+ }
169
+
170
+ export interface JsonResult extends BaseResult {
171
+ type: "json";
172
+ }
173
+
174
+ export interface ImageResult extends BaseResult {
175
+ type: "image";
176
+ value: string; // base64 data url or real url
177
+ }
178
+
179
+ export type CompletionResult = TextResult | JsonResult | ImageResult;
180
+
181
+ /**
182
+ * Output as string
183
+ */
184
+ export function completionResultToString(result: CompletionResult): string {
185
+ switch (result.type) {
186
+ case "text":
187
+ return result.value;
188
+ case "json":
189
+ return JSON.stringify(result.value, null, 2);
190
+ case "image":
191
+ return result.value;
192
+ }
193
+ }
194
+
195
+ /**
196
+ * Output as JSON, only handles the first JSON result or tries to parse text as JSON
197
+ * Expects the text to be pure JSON if no JSON result is found
198
+ * Throws if no JSON result is found or if parsing fails
199
+ */
200
+ export function parseCompletionResultsToJson(results: CompletionResult[]): any {
201
+ const jsonResults = results.filter(r => r.type === "json");
202
+ if (jsonResults.length >= 1) {
203
+ return jsonResults[0].value;
204
+ //TODO: Handle multiple json type results
205
+ }
206
+
207
+ const textResults = results.filter(r => r.type === "text").join();
208
+ if (textResults.length === 0) {
209
+ throw new Error("No JSON result found or failed to parse text");
210
+ }
211
+ try {
212
+ return JSON.parse(textResults);
213
+ }
214
+ catch {
215
+ throw new Error("No JSON result found or failed to parse text");
216
+ }
217
+ }
218
+
219
+ /**
220
+ * Output as JSON if possible, otherwise as concatenated text
221
+ * Joins text results with the specified separator, default is empty string
222
+ * If multiple JSON results are found only the first one is returned
223
+ */
224
+ export function parseCompletionResults(result: CompletionResult[], separator: string = ""): any {
225
+ try {
226
+ return parseCompletionResultsToJson(result);
227
+ } catch {
228
+ return result.map(completionResultToString).join(separator);
229
+ }
156
230
  }
157
231
 
158
- //ResultT should be either JSONObject or string
159
232
  //Internal structure used in driver implementation.
160
- export interface CompletionChunkObject<ResultT = any> {
161
- result: ResultT;
233
+ export interface CompletionChunkObject {
234
+ result: CompletionResult[];
162
235
  token_usage?: ExecutionTokenUsage;
163
236
  finish_reason?: "stop" | "length" | string;
164
237
  }
165
238
 
166
- //Internal structure used in driver implementation.
167
- export type CompletionChunk = CompletionChunkObject | string;
168
-
169
239
  export interface ToolDefinition {
170
240
  name: string,
171
241
  description?: string,
@@ -185,10 +255,9 @@ export interface ToolUse<ParamsT = JSONObject> {
185
255
  tool_input: ParamsT | null
186
256
  }
187
257
 
188
- //ResultT should be either JSONObject or string
189
- export interface Completion<ResultT = any> {
258
+ export interface Completion {
190
259
  // the driver impl must return the result and optionally the token_usage. the execution time is computed by the extended abstract driver
191
- result: ResultT;
260
+ result: CompletionResult[];
192
261
  token_usage?: ExecutionTokenUsage;
193
262
  /**
194
263
  * Contains the tools from which the model awaits information.
@@ -216,12 +285,6 @@ export interface Completion<ResultT = any> {
216
285
  conversation?: unknown;
217
286
  }
218
287
 
219
- export interface ImageGeneration {
220
-
221
- images?: string[];
222
-
223
- }
224
-
225
288
  export interface ExecutionResponse<PromptT = any> extends Completion {
226
289
  prompt: PromptT;
227
290
  /**
@@ -533,7 +596,7 @@ export interface TrainingOptions {
533
596
 
534
597
  export interface TrainingPromptOptions {
535
598
  segments: PromptSegment[];
536
- completion: string | JSONObject;
599
+ completion: CompletionResult[]
537
600
  model: string; // the model to train
538
601
  schema?: JSONSchema; // the result schema f any
539
602
  }